code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for a, b in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertAlmostEqual(lowerCAmelCase , lowerCAmelCase , delta=lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = None
ops.enable_eager_execution_internal()
UpperCAmelCase = tf.config.list_physical_devices('''CPU''' )
if len(lowerCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
UpperCAmelCase = tf.config.list_logical_devices(device_type='''CPU''' )
UpperCAmelCase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
UpperCAmelCase = GradientAccumulator()
UpperCAmelCase = tf.Variable([4.0, 3.0] )
UpperCAmelCase , UpperCAmelCase = create_optimizer(5E-5 , 10 , 5 )
UpperCAmelCase = tf.Variable([0.0, 0.0] , trainable=lowerCAmelCase )
def accumulate_on_replica(lowerCAmelCase : str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowerCAmelCase : str , lowerCAmelCase : str ):
with strategy.scope():
UpperCAmelCase = strategy.experimental_local_results(lowerCAmelCase )
local_variables[0].assign(lowerCAmelCase )
local_variables[1].assign(lowerCAmelCase )
strategy.run(lowerCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCAmelCase )
def _check_local_values(lowerCAmelCase : Any , lowerCAmelCase : str ):
UpperCAmelCase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCAmelCase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowerCAmelCase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 50
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( ):
'''simple docstring'''
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def lowerCamelCase__ ( A : List[str] ):
'''simple docstring'''
UpperCAmelCase = 1
UpperCAmelCase = 2
while i * i <= n:
UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase__ ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(A ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 50
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowercase : int = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = "esm"
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Dict=12 , lowerCAmelCase : List[Any]=3072 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Any=1026 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Optional[int]=1E-12 , lowerCAmelCase : List[str]="absolute" , lowerCAmelCase : int=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Optional[int] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , mask_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = emb_layer_norm_before
UpperCAmelCase = token_dropout
UpperCAmelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase = EsmFoldConfig()
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = EsmFoldConfig(**lowerCAmelCase )
UpperCAmelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase = get_default_vocab_list()
else:
UpperCAmelCase = vocab_list
else:
UpperCAmelCase = None
UpperCAmelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , lowerCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
UpperCAmelCase = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase ):
UpperCAmelCase = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__:
__magic_name__ : str = None
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : float = 0
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : int = 128
__magic_name__ : "TrunkConfig" = None
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
if self.trunk is None:
UpperCAmelCase = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase ):
UpperCAmelCase = TrunkConfig(**self.trunk )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
UpperCAmelCase = asdict(self )
UpperCAmelCase = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__:
__magic_name__ : int = 48
__magic_name__ : int = 1024
__magic_name__ : int = 128
__magic_name__ : int = 32
__magic_name__ : int = 32
__magic_name__ : int = 32
__magic_name__ : float = 0
__magic_name__ : float = 0
__magic_name__ : bool = False
__magic_name__ : int = 4
__magic_name__ : Optional[int] = 128
__magic_name__ : "StructureModuleConfig" = None
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
if self.structure_module is None:
UpperCAmelCase = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase ):
UpperCAmelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = asdict(self )
UpperCAmelCase = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__:
__magic_name__ : int = 384
__magic_name__ : int = 128
__magic_name__ : int = 16
__magic_name__ : int = 128
__magic_name__ : int = 12
__magic_name__ : int = 4
__magic_name__ : int = 8
__magic_name__ : float = 0.1
__magic_name__ : int = 8
__magic_name__ : int = 1
__magic_name__ : int = 2
__magic_name__ : int = 7
__magic_name__ : int = 10
__magic_name__ : float = 1e-8
__magic_name__ : float = 1e5
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
return asdict(self )
def lowerCamelCase__ ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 50
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = 1
UpperCAmelCase = 2
while i * i <= n:
UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = 1
UpperCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(A ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 50
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 1
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
UpperCAmelCase = tokenizer('''This is me''' , return_tensors='''pt''' )
UpperCAmelCase = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase = model.generate(**lowerCAmelCase )
UpperCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase = model_reloaded.generate(**lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
UpperCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase ):
model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase )
| 50
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 500
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase ) as mock_head:
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def a__( self : int )-> Tuple:
"""simple docstring"""
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 500
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase ) as mock_head:
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
try:
UpperCAmelCase = tempfile.mktemp()
with open(lowerCAmelCase , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowerCAmelCase )
UpperCAmelCase = AlbertTokenizer.from_pretrained(lowerCAmelCase )
finally:
os.remove(lowerCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowerCAmelCase )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class UpperCamelCase__( unittest.TestCase ):
__magic_name__ : List[str] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def a__( cls : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def a__( cls : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def a__( self : Optional[Any] )-> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowerCAmelCase )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase , repo_id='''test-tokenizer''' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowerCAmelCase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCAmelCase , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def a__( self : int )-> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = CustomTokenizer(lowerCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizerFast.from_pretrained(lowerCAmelCase )
bert_tokenizer.save_pretrained(lowerCAmelCase )
UpperCAmelCase = CustomTokenizerFast.from_pretrained(lowerCAmelCase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
UpperCAmelCase = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=lowerCAmelCase , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def a__( self : Optional[int] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Trie()
UpperCAmelCase = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCAmelCase , ['''AB''', '''C'''] )
| 50
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 1
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = 3
UpperCAmelCase = (32, 32)
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase )
return image
@property
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def a__( self : Any )-> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def a__( self : Dict )-> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase )
@property
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
def extract(*lowerCAmelCase : str , **lowerCAmelCase : Tuple ):
class UpperCamelCase__:
def __init__( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase = torch.ones([0] )
def a__( self : Dict , lowerCAmelCase : str )-> str:
"""simple docstring"""
self.pixel_values.to(lowerCAmelCase )
return self
return Out()
return extract
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''A painting of a squirrel eating a burger'''
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet
UpperCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''A painting of a squirrel eating a burger'''
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Any )-> Any:
"""simple docstring"""
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert isinstance(pipe.scheduler , lowerCAmelCase )
assert pipe.safety_checker is None
UpperCAmelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.dummy_cond_unet
UpperCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCAmelCase = unet.half()
UpperCAmelCase = vae.half()
UpperCAmelCase = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''A painting of a squirrel eating a burger'''
UpperCAmelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase )
UpperCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
UpperCAmelCase = 4003660346
UpperCAmelCase = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase )
UpperCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''padme amidala taking a bath artwork, safe for work, no nudity'''
UpperCAmelCase = 2734971755
UpperCAmelCase = 7
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
UpperCAmelCase = 1044355234
UpperCAmelCase = 12
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 1
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase__ ( A : Dict , A : Any ):
'''simple docstring'''
UpperCAmelCase = []
for part_id in partition_order:
UpperCAmelCase = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(1_00 ).repartition(1 )
UpperCAmelCase = Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(10 ).repartition(2 )
UpperCAmelCase = [1, 0]
UpperCAmelCase = _generate_iterable_examples(A , A ) # Reverse the partitions.
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(10 ).repartition(1 )
UpperCAmelCase = SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
UpperCAmelCase = lambda A : x.reverse()
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
UpperCAmelCase = SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase = SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase = SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(1_00 ).repartition(1 )
UpperCAmelCase = Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 50
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( A : Optional[Any] , A : Any , A : List[str] ):
'''simple docstring'''
if isinstance(A , torch.Tensor ):
return image
elif isinstance(A , PIL.Image.Image ):
UpperCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCAmelCase = np.concatenate(A , axis=0 )
UpperCAmelCase = np.array(A ).astype(np.floataa ) / 255.0
UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase = 2.0 * image - 1.0
UpperCAmelCase = torch.from_numpy(A )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase = torch.cat(A , dim=0 )
return image
def lowerCamelCase__ ( A : str , A : str , A : List[Any] , A : Optional[int]=0.9_995 ):
'''simple docstring'''
if not isinstance(A , np.ndarray ):
UpperCAmelCase = True
UpperCAmelCase = va.device
UpperCAmelCase = va.cpu().numpy()
UpperCAmelCase = va.cpu().numpy()
UpperCAmelCase = np.sum(va * va / (np.linalg.norm(A ) * np.linalg.norm(A )) )
if np.abs(A ) > DOT_THRESHOLD:
UpperCAmelCase = (1 - t) * va + t * va
else:
UpperCAmelCase = np.arccos(A )
UpperCAmelCase = np.sin(A )
UpperCAmelCase = theta_a * t
UpperCAmelCase = np.sin(A )
UpperCAmelCase = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase = sin_theta_t / sin_theta_a
UpperCAmelCase = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase = torch.from_numpy(A ).to(A )
return va
def lowerCamelCase__ ( A : Optional[Any] , A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = F.normalize(A , dim=-1 )
UpperCAmelCase = F.normalize(A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( A : Union[str, Any] , A : Any ):
'''simple docstring'''
for param in model.parameters():
UpperCAmelCase = value
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowerCAmelCase : CLIPFeatureExtractor , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Tuple=None , )-> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCAmelCase , text_encoder=lowerCAmelCase , clip_model=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , feature_extractor=lowerCAmelCase , coca_model=lowerCAmelCase , coca_tokenizer=lowerCAmelCase , coca_transform=lowerCAmelCase , )
UpperCAmelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCAmelCase )
else feature_extractor.size['''shortest_edge''']
)
UpperCAmelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCAmelCase )
set_requires_grad(self.clip_model , lowerCAmelCase )
def a__( self : Tuple , lowerCAmelCase : Optional[Union[str, int]] = "auto" )-> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase )
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase )
def a__( self : str )-> str:
"""simple docstring"""
set_requires_grad(self.vae , lowerCAmelCase )
def a__( self : List[Any] )-> int:
"""simple docstring"""
set_requires_grad(self.vae , lowerCAmelCase )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
set_requires_grad(self.unet , lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
set_requires_grad(self.unet , lowerCAmelCase )
def a__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowerCAmelCase )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]=None )-> List[str]:
"""simple docstring"""
if not isinstance(lowerCAmelCase , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase )}""" )
UpperCAmelCase = image.to(device=lowerCAmelCase , dtype=lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase )
]
UpperCAmelCase = torch.cat(lowerCAmelCase , dim=0 )
else:
UpperCAmelCase = self.vae.encode(lowerCAmelCase ).latent_dist.sample(lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase = 0.18215 * init_latents
UpperCAmelCase = init_latents.repeat_interleave(lowerCAmelCase , dim=0 )
UpperCAmelCase = randn_tensor(init_latents.shape , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = init_latents
return latents
def a__( self : List[str] , lowerCAmelCase : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.coca_transform(lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCAmelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.feature_extractor.preprocess(lowerCAmelCase )
UpperCAmelCase = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase = self.clip_model.get_image_features(lowerCAmelCase )
UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase )
UpperCAmelCase = image_embeddings_clip.repeat_interleave(lowerCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , )-> Tuple:
"""simple docstring"""
UpperCAmelCase = latents.detach().requires_grad_()
UpperCAmelCase = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
UpperCAmelCase = self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase = torch.sqrt(lowerCAmelCase )
UpperCAmelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCAmelCase ):
UpperCAmelCase = self.scheduler.sigmas[index]
UpperCAmelCase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase = 1 / 0.18215 * sample
UpperCAmelCase = self.vae.decode(lowerCAmelCase ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = transforms.Resize(self.feature_extractor_size )(lowerCAmelCase )
UpperCAmelCase = self.normalize(lowerCAmelCase ).to(latents.dtype )
UpperCAmelCase = self.clip_model.get_image_features(lowerCAmelCase )
UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase )
UpperCAmelCase = spherical_dist_loss(lowerCAmelCase , lowerCAmelCase ).mean() * clip_guidance_scale
UpperCAmelCase = -torch.autograd.grad(lowerCAmelCase , lowerCAmelCase )[0]
if isinstance(self.scheduler , lowerCAmelCase ):
UpperCAmelCase = latents.detach() + grads * (sigma**2)
UpperCAmelCase = noise_pred_original
else:
UpperCAmelCase = noise_pred_original - torch.sqrt(lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = 512 , lowerCAmelCase : Optional[int] = 512 , lowerCAmelCase : float = 0.6 , lowerCAmelCase : Optional[int] = 50 , lowerCAmelCase : Optional[float] = 7.5 , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[float] = 100 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : float = 0.8 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , )-> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(lowerCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(lowerCAmelCase , torch.Generator ) and batch_size > 1:
UpperCAmelCase = [generator] + [None] * (batch_size - 1)
UpperCAmelCase = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCAmelCase = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase = ''', '''.join(lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCAmelCase ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
UpperCAmelCase = self.get_image_description(lowerCAmelCase )
if style_prompt is None:
if len(lowerCAmelCase ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
UpperCAmelCase = self.get_image_description(lowerCAmelCase )
# get prompt text embeddings for content and style
UpperCAmelCase = self.tokenizer(
lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors='''pt''' , )
UpperCAmelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase = self.tokenizer(
lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors='''pt''' , )
UpperCAmelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase = slerp(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase = text_embeddings.repeat_interleave(lowerCAmelCase , dim=0 )
# set timesteps
UpperCAmelCase = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase = {}
if accepts_offset:
UpperCAmelCase = 1
self.scheduler.set_timesteps(lowerCAmelCase , **lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowerCAmelCase , lowerCAmelCase , self.device )
UpperCAmelCase = timesteps[:1].repeat(lowerCAmelCase )
# Preprocess image
UpperCAmelCase = preprocess(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.prepare_latents(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text_embeddings.dtype , self.device , lowerCAmelCase )
UpperCAmelCase = preprocess(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.prepare_latents(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text_embeddings.dtype , self.device , lowerCAmelCase )
UpperCAmelCase = slerp(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if clip_guidance_scale > 0:
UpperCAmelCase = self.get_clip_image_embeddings(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_clip_image_embeddings(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = slerp(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase = content_text_input.input_ids.shape[-1]
UpperCAmelCase = self.tokenizer([''''''] , padding='''max_length''' , max_length=lowerCAmelCase , return_tensors='''pt''' )
UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase = uncond_embeddings.repeat_interleave(lowerCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device='''cpu''' , dtype=lowerCAmelCase ).to(
self.device )
else:
UpperCAmelCase = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCAmelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase = {}
if accepts_eta:
UpperCAmelCase = eta
# check if the scheduler accepts generator
UpperCAmelCase = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase = generator
with self.progress_bar(total=lowerCAmelCase ):
for i, t in enumerate(lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
UpperCAmelCase = self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase , UpperCAmelCase = self.cond_fn(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase = 1 / 0.18215 * latents
UpperCAmelCase = self.vae.decode(lowerCAmelCase ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCAmelCase , nsfw_content_detected=lowerCAmelCase )
| 50
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Tuple = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["""PerceiverFeatureExtractor"""]
_lowercase : Optional[int] = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : List[Any] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCAmelCase = min(A , A )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 50
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : int = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_lowercase : Dict = """docs/source/en/_toctree.yml"""
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
UpperCAmelCase = defaultdict(A )
UpperCAmelCase = []
UpperCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(A )
UpperCAmelCase = new_doc_list
UpperCAmelCase = [key for key, value in counts.items() if value > 1]
UpperCAmelCase = []
for duplicate_key in duplicates:
UpperCAmelCase = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(A ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCAmelCase = sorted(A , key=lambda A : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(A ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(A )
# Sort
return overview_doc
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
with open(A , encoding='''utf-8''' ) as f:
UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCAmelCase = api_doc[scheduler_idx]['''sections''']
UpperCAmelCase = clean_doc_toc(A )
UpperCAmelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCAmelCase = True
if overwrite:
UpperCAmelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCAmelCase = api_doc
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(A , allow_unicode=A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def lowerCamelCase__ ( A : Optional[Any]=False ):
'''simple docstring'''
with open(A , encoding='''utf-8''' ) as f:
UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCAmelCase = False
UpperCAmelCase = api_doc[pipeline_idx]['''sections''']
UpperCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCAmelCase = pipeline_doc['''section''']
UpperCAmelCase = clean_doc_toc(A )
if overwrite:
UpperCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(A )
# sort overall pipeline doc
UpperCAmelCase = clean_doc_toc(A )
if new_pipeline_docs != pipeline_docs:
UpperCAmelCase = True
if overwrite:
UpperCAmelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCAmelCase = api_doc
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(A , allow_unicode=A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowercase : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 50
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 1
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCamelCase__ ( A : np.ndarray , A : np.ndarray , A : np.ndarray ):
'''simple docstring'''
UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(A , A )
# Predict target for test data
UpperCAmelCase = xgb.predict(A )
UpperCAmelCase = predictions.reshape(len(A ) , 1 )
return predictions
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = fetch_california_housing()
UpperCAmelCase , UpperCAmelCase = data_handling(A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(
A , A , test_size=0.25 , random_state=1 )
UpperCAmelCase = xgboost(A , A , A )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(A , A )}""" )
print(f"""Mean Square Error : {mean_squared_error(A , A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 1
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowercase : List[str] = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowercase : Optional[Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowercase : Any = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def a__( self : Optional[int] )-> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def a__( self : Optional[Any] , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , )-> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase )
}
| 50
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 1
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase : List[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase : Tuple = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase : str = BeautifulSoup(res.text, """html.parser""")
_lowercase : str = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get("href")}""")
| 50
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 1
|
'''simple docstring'''
import math
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = [True] * n
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCAmelCase = i * 2
while index < n:
UpperCAmelCase = False
UpperCAmelCase = index + i
UpperCAmelCase = [2]
for i in range(3 , A , 2 ):
if is_prime[i]:
primes.append(A )
return primes
def lowerCamelCase__ ( A : int = 99_99_66_66_33_33 ):
'''simple docstring'''
UpperCAmelCase = math.floor(math.sqrt(A ) ) + 1_00
UpperCAmelCase = prime_sieve(A )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase = primes[prime_index + 1]
UpperCAmelCase = last_prime**2
UpperCAmelCase = next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 50
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 1
|
'''simple docstring'''
from functools import lru_cache
@lru_cache
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 1
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 1
|
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCamelCase__ ( A : Optional[int] , A : Dict , A : List[Any] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = multiprocessing.Manager()
UpperCAmelCase = manager.list()
UpperCAmelCase = multiprocessing.Process(target=A , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCamelCase__ ( A : Optional[Any] , A : Optional[int] , A : Any ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase = shutil.rmtree
UpperCAmelCase = os.rmdir
UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase = {}
with swallow_io():
with time_limit(A ):
exec(A , A )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
UpperCAmelCase = rmtree
UpperCAmelCase = rmdir
UpperCAmelCase = chdir
@contextlib.contextmanager
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
def signal_handler(A : Dict , A : List[str] ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , A )
signal.signal(signal.SIGALRM , A )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(A ):
with contextlib.redirect_stderr(A ):
with redirect_stdin(A ):
yield
@contextlib.contextmanager
def lowerCamelCase__ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(A ):
yield dirname
class UpperCamelCase__( lowerCAmelCase ):
pass
class UpperCamelCase__( io.StringIO ):
def a__( self : int , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any] )-> int:
"""simple docstring"""
raise OSError
def a__( self : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : int )-> str:
"""simple docstring"""
raise OSError
def a__( self : Any , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any )-> Dict:
"""simple docstring"""
raise OSError
def a__( self : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] )-> List[Any]:
"""simple docstring"""
return False
class UpperCamelCase__( contextlib._RedirectStream ): # type: ignore
__magic_name__ : List[str] = "stdin"
@contextlib.contextmanager
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
if root == ".":
yield
return
UpperCAmelCase = os.getcwd()
os.chdir(A )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(A )
def lowerCamelCase__ ( A : Union[str, Any]=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase = None
UpperCAmelCase = None
import os
UpperCAmelCase = '''1'''
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
import shutil
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
import subprocess
UpperCAmelCase = None # type: ignore
UpperCAmelCase = None
import sys
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
| 50
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowercase : List[str] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
if isinstance(A , torch.Tensor ):
return image
elif isinstance(A , PIL.Image.Image ):
UpperCAmelCase = [image]
UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image]
UpperCAmelCase = torch.stack(A )
return image
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] )-> Tuple:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
def a__( self : Any , lowerCAmelCase : str )-> str:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def a__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Any )-> int:
"""simple docstring"""
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowerCAmelCase )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__( self : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : str=None )-> Dict:
"""simple docstring"""
if not isinstance(lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase )}""" )
UpperCAmelCase = image.to(device=lowerCAmelCase , dtype=lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
# get latents
print('''add noise to latents at timestep''' , lowerCAmelCase )
UpperCAmelCase = self.scheduler.add_noise(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCAmelCase : float = 0.8 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 50 , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , )-> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(lowerCAmelCase )
# 2. Preprocess image
UpperCAmelCase = preprocess(lowerCAmelCase )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCAmelCase , device=self.device )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowerCAmelCase , lowerCAmelCase , self.device )
UpperCAmelCase = timesteps[:1].repeat(lowerCAmelCase )
# 4. Prepare latent variables
UpperCAmelCase = self.prepare_latents(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.unet.dtype , self.device , lowerCAmelCase )
UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCAmelCase ):
# 1. predict noise model_output
UpperCAmelCase = self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , eta=lowerCAmelCase , use_clipped_model_output=lowerCAmelCase , generator=lowerCAmelCase , ).prev_sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCAmelCase )
| 50
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = "mgp-str"
def __init__( self : int , lowerCAmelCase : int=[32, 128] , lowerCAmelCase : str=4 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : str=27 , lowerCAmelCase : Dict=38 , lowerCAmelCase : str=50257 , lowerCAmelCase : List[Any]=30522 , lowerCAmelCase : Union[str, Any]=768 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Optional[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Union[str, Any]=1E-5 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Union[str, Any]=0.02 , **lowerCAmelCase : int , )-> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = max_token_length
UpperCAmelCase = num_character_labels
UpperCAmelCase = num_bpe_labels
UpperCAmelCase = num_wordpiece_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = mlp_ratio
UpperCAmelCase = distilled
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_rate
UpperCAmelCase = qkv_bias
UpperCAmelCase = attn_drop_rate
UpperCAmelCase = drop_path_rate
UpperCAmelCase = output_aa_attentions
UpperCAmelCase = initializer_range
| 50
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 1
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase__ ( A : Union[str, Any]=32 , A : Dict=10 , A : int=1_00 , A : Optional[int]=10_26 , A : Dict=True , A : Dict="data/tokenized_stories_train_wikitext103.jbl" , A : Optional[int]="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
UpperCAmelCase , UpperCAmelCase = generate_datasets(
A , A , number=A , min_len=10_26 , trim=A )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCAmelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
UpperCAmelCase = load_gpta('''gpt2''' ).to(A )
print('''computing perplexity on objective set''' )
UpperCAmelCase = compute_perplexity(A , A , A ).item()
print('''perplexity on objective set:''' , A )
# collect igf pairs and save to file demo.jbl
collect_objective_set(A , A , A , A , A , A , A , A )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase__ ( A : Optional[Any] , A : List[Any]=15 , A : Any=1_28 , A : Dict=1_00 , A : Any="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
UpperCAmelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
UpperCAmelCase = SecondaryLearner(A )
# Train secondary learner
UpperCAmelCase = train_secondary_learner(
A , A , max_epochs=A , batch_size=A , eval_freq=1_00 , igf_model_path=A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase__ ( A : int , A : List[Any] , A : Any , A : Optional[Any]=32 , A : Union[str, Any]=10_00 , A : List[Any]=16 , A : Any=1.0 , A : List[str]=recopy_gpta , A : Any=None , A : Union[str, Any]=10 , A : Any="gpt2_finetuned.pt" , ):
'''simple docstring'''
UpperCAmelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase = RandomSampler(A )
UpperCAmelCase = DataLoader(A , sampler=A )
UpperCAmelCase = max_steps // (len(A )) + 1
UpperCAmelCase = 0
UpperCAmelCase = torch.zeros((1, context_len) , dtype=torch.long , device=A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = recopy_model(A , A , A )
model.train()
if secondary_learner is not None:
secondary_learner.to(A )
secondary_learner.eval()
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = []
UpperCAmelCase = []
# Compute the performance of the transformer model at the beginning
UpperCAmelCase = compute_perplexity(A , A , A )
test_perps.append(A )
print('''Test perplexity, step''' , A , ''':''' , A )
for epoch in range(int(A ) ):
for step, example in enumerate(A ):
torch.cuda.empty_cache()
UpperCAmelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCAmelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCAmelCase = model(A , labels=A )
UpperCAmelCase = True
if secondary_learner is not None:
UpperCAmelCase = secondary_learner.forward(
torch.tensor(A , dtype=torch.long , device=A ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(A ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCAmelCase = -1
if predicted_q < threshold:
UpperCAmelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCAmelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCAmelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCAmelCase = compute_perplexity(A , A , A )
test_perps.append(A )
print('''Test perplexity, step''' , A , ''':''' , A )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , A )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=A , type=A , required=A , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=A , type=A , required=A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=A , default=A , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=A , default=A , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=A , type=A , required=A , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=A , type=A , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=A , default=A , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=A , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=A , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=A , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=A , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=A , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=A , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=A , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=A , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=A , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=A , type=A , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=A , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=A , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=A , type=A , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=A , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
UpperCAmelCase = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
UpperCAmelCase = training_secondary_learner(
A , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
UpperCAmelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCAmelCase , UpperCAmelCase = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=A )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
A , A , A , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=A , secondary_learner=A , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 50
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
'''simple docstring'''
from collections.abc import Generator
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = 0, 1
while True:
UpperCAmelCase , UpperCAmelCase = b, a + b
yield b
def lowerCamelCase__ ( A : int = 10_00 ):
'''simple docstring'''
UpperCAmelCase = 1
UpperCAmelCase = fibonacci_generator()
while len(str(next(A ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_lowercase : List[str] = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_lowercase : str = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_lowercase : List[str] = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def a__( self : Tuple )-> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def a__( self : int , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Any=False , lowerCAmelCase : Optional[Any]=False , )-> Optional[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase = np.array([re.sub(lowerCAmelCase , '''''' , lowerCAmelCase ) for x in predictions] )
UpperCAmelCase = np.array([re.sub(lowerCAmelCase , '''''' , lowerCAmelCase ) for x in references] )
else:
UpperCAmelCase = np.asarray(lowerCAmelCase )
UpperCAmelCase = np.asarray(lowerCAmelCase )
if ignore_case:
UpperCAmelCase = np.char.lower(lowerCAmelCase )
UpperCAmelCase = np.char.lower(lowerCAmelCase )
if ignore_punctuation:
UpperCAmelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
UpperCAmelCase = np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
UpperCAmelCase = np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
if ignore_numbers:
UpperCAmelCase = string.digits.maketrans('''''' , '''''' , string.digits )
UpperCAmelCase = np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
UpperCAmelCase = np.char.translate(lowerCAmelCase , table=lowerCAmelCase )
UpperCAmelCase = predictions == references
return {"exact_match": np.mean(lowerCAmelCase ) * 100}
| 50
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = 2
UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(A )
if n > 1:
factors.append(A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 1
|
'''simple docstring'''
from functools import reduce
_lowercase : int = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase__ ( A : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda A , A : str(int(A ) * int(A ) ) , n[i : i + 13] ) )
for i in range(len(A ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : int = ["torch", "transformers", "onnx"]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] )-> Dict:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Dict )-> Any:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int )-> int:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : Dict = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[Any] )-> List[str]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : str )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : Tuple = ["torch", "transformers", "onnx"]
def __init__( self : int , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : str )-> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : Optional[Any] , *lowerCAmelCase : str , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[Any] )-> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : Optional[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any] )-> str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any )-> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *lowerCAmelCase : List[Any] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any] )-> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any] )-> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : str )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__( cls : Any , *lowerCAmelCase : int , **lowerCAmelCase : Dict )-> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 50
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = int(A )
if decimal in (0, 1): # Exit cases for the recursion
return str(A )
UpperCAmelCase , UpperCAmelCase = divmod(A , 2 )
return binary_recursive(A ) + str(A )
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = str(A ).strip()
if not number:
raise ValueError('''No input value was provided''' )
UpperCAmelCase = '''-''' if number.startswith('''-''' ) else ''''''
UpperCAmelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"""{negative}0b{binary_recursive(int(A ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : List[str] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = 10
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [1, 2, 3, 4]
UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase , self.block_size , 0 ) , lowerCAmelCase )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase , self.block_size , 0 ) , lowerCAmelCase )
def a__( self : List[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase , self.block_size , 0 ) , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase , UpperCAmelCase = process_story(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , [] )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = ''''''
UpperCAmelCase , UpperCAmelCase = process_story(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , [] )
self.assertEqual(lowerCAmelCase , [] )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase , UpperCAmelCase = process_story(lowerCAmelCase )
UpperCAmelCase = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = ['''It was the best of times.''']
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def a__( self : Any )-> Any:
"""simple docstring"""
UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase , 23 ).numpy() , expected.numpy() )
def a__( self : Tuple )-> Any:
"""simple docstring"""
UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = 101
UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase = compute_token_type_ids(lowerCAmelCase , lowerCAmelCase )
np.testing.assert_array_equal(lowerCAmelCase , lowerCAmelCase )
| 50
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : int = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
_lowercase : Any = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def lowerCamelCase__ ( A : Dict , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase = int(re.match(R'''.*layer_(\d*).*''' , A )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase = re.search(R'''[^\d](\d+)$''' , str(A ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
UpperCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( A : Union[str, Any] , A : str , A : Any , A : List[Any] , A : str ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase = BloomConfig()
else:
UpperCAmelCase = BloomConfig.from_json_file(A )
if shard_model:
UpperCAmelCase = os.listdir(A )
UpperCAmelCase = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
UpperCAmelCase = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = BloomConfig()
for j, file in enumerate(A ):
print('''Processing file: {}'''.format(A ) )
UpperCAmelCase = None
for i in range(A ):
# load all TP files
UpperCAmelCase = file.replace('''model_00''' , f"""model_0{i}""" )
UpperCAmelCase = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase = list(temp.keys() )
for key in keys:
UpperCAmelCase = temp.pop(A )
if tensors is None:
UpperCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase = tensors[key] / pretraining_tp
torch.save(
A , os.path.join(
A , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) )
UpperCAmelCase = BloomConfig()
UpperCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase = total_size
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.dumps(A , indent=2 , sort_keys=A ) + '''\n'''
f.write(A )
else:
UpperCAmelCase = BloomModel(A )
UpperCAmelCase = os.listdir(A )
UpperCAmelCase = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
UpperCAmelCase = None
for i, file in enumerate(A ):
UpperCAmelCase = None
for i in range(A ):
# load all TP files
UpperCAmelCase = file.replace('''model_00''' , f"""model_0{i}""" )
UpperCAmelCase = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase = list(temp.keys() )
for key in keys:
UpperCAmelCase = temp.pop(A )
if tensors is None:
UpperCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase = tensors[key] / pretraining_tp
UpperCAmelCase = model.load_state_dict(A , strict=A )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
UpperCAmelCase = set(other_keys.missing_keys )
else:
UpperCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A , exist_ok=A )
UpperCAmelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
UpperCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , A )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
_lowercase : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 50
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_lowercase : Optional[int] = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = "rag"
__magic_name__ : Any = True
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=" / " , lowerCAmelCase : Dict=" // " , lowerCAmelCase : int=5 , lowerCAmelCase : Optional[Any]=300 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Optional[Any]=8 , lowerCAmelCase : int="wiki_dpr" , lowerCAmelCase : Optional[int]="train" , lowerCAmelCase : Any="compressed" , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=False , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any=False , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Optional[Any] , )-> int:
"""simple docstring"""
super().__init__(
bos_token_id=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , prefix=lowerCAmelCase , vocab_size=lowerCAmelCase , **lowerCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase = kwargs.pop('''question_encoder''' )
UpperCAmelCase = question_encoder_config.pop('''model_type''' )
UpperCAmelCase = kwargs.pop('''generator''' )
UpperCAmelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = reduce_loss
UpperCAmelCase = label_smoothing
UpperCAmelCase = exclude_bos_score
UpperCAmelCase = do_marginalize
UpperCAmelCase = title_sep
UpperCAmelCase = doc_sep
UpperCAmelCase = n_docs
UpperCAmelCase = max_combined_length
UpperCAmelCase = dataset
UpperCAmelCase = dataset_split
UpperCAmelCase = index_name
UpperCAmelCase = retrieval_vector_size
UpperCAmelCase = retrieval_batch_size
UpperCAmelCase = passages_path
UpperCAmelCase = index_path
UpperCAmelCase = use_dummy_dataset
UpperCAmelCase = output_retrieved
UpperCAmelCase = do_deduplication
UpperCAmelCase = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase = getattr(self.generator , '''forced_eos_token_id''' , lowerCAmelCase )
@classmethod
def a__( cls : Any , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : Optional[int] )-> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCAmelCase )
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.question_encoder.to_dict()
UpperCAmelCase = self.generator.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 50
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowercase : Any = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def lowerCamelCase__ ( A : Any , A : Tuple ):
'''simple docstring'''
inspect_dataset(A , A )
UpperCAmelCase = path + '''.py'''
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def lowerCamelCase__ ( A : List[str] , A : Tuple ):
'''simple docstring'''
inspect_metric(A , A )
UpperCAmelCase = path + '''.py'''
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowerCamelCase__ ( A : Dict , A : Optional[int] , A : List[str] ):
'''simple docstring'''
UpperCAmelCase = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowerCamelCase__ ( A : Union[str, Any] , A : List[str] , A : int ):
'''simple docstring'''
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def lowerCamelCase__ ( A : Optional[Any] , A : int ):
'''simple docstring'''
UpperCAmelCase = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase = expected_configs[0]
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowerCamelCase__ ( A : str , A : Union[str, Any] , A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = get_dataset_infos(A )
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowerCamelCase__ ( A : Union[str, Any] , A : List[str] , A : int ):
'''simple docstring'''
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 50
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : str = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
if n_term == "":
return []
UpperCAmelCase = []
for temp in range(int(A ) ):
series.append(f"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
_lowercase : Union[str, Any] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 50
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 1
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCamelCase__:
def __init__( self : Union[str, Any] , lowerCAmelCase : list[tuple[float, float]] )-> Any:
"""simple docstring"""
UpperCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase = len(lowerCAmelCase ) - 1
def a__( self : str , lowerCAmelCase : float )-> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase ) , 5 ) == 1
return output_values
def a__( self : Union[str, Any] , lowerCAmelCase : float )-> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase = self.basis_function(lowerCAmelCase )
UpperCAmelCase = 0.0
UpperCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def a__( self : List[str] , lowerCAmelCase : float = 0.01 )-> Any:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase = [] # x coordinates of points to plot
UpperCAmelCase = [] # y coordinates of points to plot
UpperCAmelCase = 0.0
while t <= 1:
UpperCAmelCase = self.bezier_curve_function(lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase = [i[0] for i in self.list_of_points]
UpperCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase , lowerCAmelCase , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(lowerCAmelCase , lowerCAmelCase , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 50
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 1
|
'''simple docstring'''
from statistics import mean
import numpy as np
def lowerCamelCase__ ( A : list , A : list , A : list , A : int ):
'''simple docstring'''
UpperCAmelCase = 0
# Number of processes finished
UpperCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCAmelCase = [0] * no_of_process
# List to include calculation results
UpperCAmelCase = [0] * no_of_process
# Sort by arrival time.
UpperCAmelCase = [burst_time[i] for i in np.argsort(A )]
UpperCAmelCase = [process_name[i] for i in np.argsort(A )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCAmelCase = arrival_time[i]
UpperCAmelCase = 0
# Index showing the location of the process being performed
UpperCAmelCase = 0
# Saves the current response ratio.
UpperCAmelCase = 0
for i in range(0 , A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCAmelCase = temp
UpperCAmelCase = i
# Calculate the turn around time
UpperCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase__ ( A : list , A : list , A : list , A : int ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_process
for i in range(0 , A ):
UpperCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_lowercase : int = 5
_lowercase : List[str] = ["""A""", """B""", """C""", """D""", """E"""]
_lowercase : Tuple = [1, 2, 3, 4, 5]
_lowercase : Union[str, Any] = [1, 2, 3, 4, 5]
_lowercase : int = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_lowercase : List[str] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 50
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 1
|
'''simple docstring'''
import socket
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase = socket.gethostname()
UpperCAmelCase = 1_23_12
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCAmelCase = sock.recv(10_24 )
if not data:
break
out_file.write(A )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 50
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 1
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
_lowercase : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
_lowercase : Union[str, Any] = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = CamembertTokenizer
__magic_name__ : Dict = CamembertTokenizerFast
__magic_name__ : int = True
__magic_name__ : Optional[int] = True
def a__( self : Optional[Any] )-> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase ) , 1004 )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowerCAmelCase , )
| 50
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : list[int] ):
'''simple docstring'''
UpperCAmelCase = []
if len(A ) == 1:
return [nums.copy()]
for _ in range(len(A ) ):
UpperCAmelCase = nums.pop(0 )
UpperCAmelCase = permute(A )
for perm in permutations:
perm.append(A )
result.extend(A )
nums.append(A )
return result
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
def backtrack(A : Union[str, Any] ):
if start == len(A ) - 1:
output.append(nums[:] )
else:
for i in range(A , len(A ) ):
UpperCAmelCase , UpperCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase , UpperCAmelCase = nums[i], nums[start] # backtrack
UpperCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_lowercase : Optional[int] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 50
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 1
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : str = StableDiffusionControlNetImgaImgPipeline
__magic_name__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__magic_name__ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__magic_name__ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__( self : Any )-> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a__( self : Any , lowerCAmelCase : int , lowerCAmelCase : List[str]=0 )-> Any:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = 2
UpperCAmelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
UpperCAmelCase = floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def a__( self : List[str] )-> int:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__( self : Dict )-> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Tuple = StableDiffusionControlNetImgaImgPipeline
__magic_name__ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__magic_name__ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ : Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase : Tuple ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any]=0 )-> List[str]:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = 2
UpperCAmelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
UpperCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
UpperCAmelCase = 10.0
UpperCAmelCase = 4
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase = steps
UpperCAmelCase = scale
UpperCAmelCase = pipe(**lowerCAmelCase )[0]
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase = steps
UpperCAmelCase = scale
UpperCAmelCase = pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase = steps
UpperCAmelCase = scale
UpperCAmelCase = pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase = steps
UpperCAmelCase = scale
UpperCAmelCase = pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def a__( self : str )-> List[str]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = '''evil space-punk bird'''
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCAmelCase = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCAmelCase = pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 50
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCAmelCase = ''''''
UpperCAmelCase = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCAmelCase , UpperCAmelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCAmelCase = [1 for i in range(len(A ) )]
# for each character in new_string find corresponding palindromic string
UpperCAmelCase = 0
for j in range(len(A ) ):
UpperCAmelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCAmelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCAmelCase = j - k + 1 # noqa: E741
UpperCAmelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCAmelCase = length[j]
UpperCAmelCase = j
# create that string
UpperCAmelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
@property
def a__( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.dummy_uncond_unet
UpperCAmelCase = PNDMScheduler()
UpperCAmelCase = PNDMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pndm.to(lowerCAmelCase )
pndm.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , num_inference_steps=20 , output_type='''numpy''' ).images
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , num_inference_steps=20 , output_type='''numpy''' , return_dict=lowerCAmelCase )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''google/ddpm-cifar10-32'''
UpperCAmelCase = UNetaDModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase = PNDMScheduler()
UpperCAmelCase = PNDMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pndm.to(lowerCAmelCase )
pndm.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , output_type='''numpy''' ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 50
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
from math import factorial
def lowerCamelCase__ ( A : int = 20 ):
'''simple docstring'''
UpperCAmelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase = n // 2
return int(factorial(A ) / (factorial(A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
_lowercase : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 50
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
UpperCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase = os.path.join(self.tmpdirname , lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
def a__( self : Optional[int] , **lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def a__( self : str , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def a__( self : List[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase )
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase )
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__( self : int )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = processor(text=lowerCAmelCase )
UpperCAmelCase = tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def a__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(lowerCAmelCase )
UpperCAmelCase = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 50
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Dict )-> Dict:
"""simple docstring"""
self.test()
def a__( self : Tuple )-> str:
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase = self.advance()
if not self.does_advance(lowerCAmelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.update(lowerCAmelCase )
counter += 1
if counter > 10000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def a__( self : str )-> Any:
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a__( self : Optional[int] , lowerCAmelCase : int )-> int:
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a__( self : Optional[Any] , lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a__( self : List[Any] )-> Any:
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a__( self : List[str] )-> Dict:
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a__( self : int , lowerCAmelCase : Tuple=False )-> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Optional[int] , lowerCAmelCase : List[int] )-> List[str]:
"""simple docstring"""
super(lowerCAmelCase , self ).__init__()
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or len(lowerCAmelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(lowerCAmelCase , lowerCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
UpperCAmelCase = token_ids
UpperCAmelCase = len(self.token_ids )
UpperCAmelCase = -1 # the index of the currently fulfilled step
UpperCAmelCase = False
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def a__( self : List[Any] , lowerCAmelCase : int )-> List[str]:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def a__( self : Optional[int] , lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase )}""" )
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
if self.does_advance(lowerCAmelCase ):
self.fulfilled_idx += 1
UpperCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase = True
UpperCAmelCase = completed
else:
# failed to make progress.
UpperCAmelCase = True
self.reset()
return stepped, completed, reset
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = False
UpperCAmelCase = 0
def a__( self : int )-> List[str]:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def a__( self : Dict , lowerCAmelCase : str=False )-> int:
"""simple docstring"""
UpperCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase = self.seqlen
UpperCAmelCase = self.fulfilled_idx
UpperCAmelCase = self.completed
return new_constraint
class UpperCamelCase__:
def __init__( self : Optional[Any] , lowerCAmelCase : List[List[int]] , lowerCAmelCase : Dict=True )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = max([len(lowerCAmelCase ) for one in nested_token_ids] )
UpperCAmelCase = {}
for token_ids in nested_token_ids:
UpperCAmelCase = root
for tidx, token_id in enumerate(lowerCAmelCase ):
if token_id not in level:
UpperCAmelCase = {}
UpperCAmelCase = level[token_id]
if no_subsets and self.has_subsets(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
UpperCAmelCase = root
def a__( self : Dict , lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.trie
for current_token in current_seq:
UpperCAmelCase = start[current_token]
UpperCAmelCase = list(start.keys() )
return next_tokens
def a__( self : int , lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.next_tokens(lowerCAmelCase )
return len(lowerCAmelCase ) == 0
def a__( self : Dict , lowerCAmelCase : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = list(root.values() )
if len(lowerCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(lowerCAmelCase ) for nn in next_nodes] )
def a__( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.count_leaves(lowerCAmelCase )
return len(lowerCAmelCase ) != leaf_count
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : List[Any] , lowerCAmelCase : List[List[int]] )-> int:
"""simple docstring"""
super(lowerCAmelCase , self ).__init__()
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or len(lowerCAmelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(lowerCAmelCase , lowerCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(lowerCAmelCase , lowerCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
UpperCAmelCase = DisjunctiveTrie(lowerCAmelCase )
UpperCAmelCase = nested_token_ids
UpperCAmelCase = self.trie.max_height
UpperCAmelCase = []
UpperCAmelCase = False
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.trie.next_tokens(self.current_seq )
if len(lowerCAmelCase ) == 0:
return None
else:
return token_list
def a__( self : List[str] , lowerCAmelCase : int )-> int:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase )}""" )
UpperCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def a__( self : Optional[Any] , lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase )}""" )
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
if self.does_advance(lowerCAmelCase ):
self.current_seq.append(lowerCAmelCase )
UpperCAmelCase = True
else:
UpperCAmelCase = True
self.reset()
UpperCAmelCase = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase = completed
return stepped, completed, reset
def a__( self : str )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = False
UpperCAmelCase = []
def a__( self : Dict )-> str:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def a__( self : int , lowerCAmelCase : Any=False )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase = self.seqlen
UpperCAmelCase = self.current_seq
UpperCAmelCase = self.completed
return new_constraint
class UpperCamelCase__:
def __init__( self : Tuple , lowerCAmelCase : List[Constraint] )-> int:
"""simple docstring"""
UpperCAmelCase = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase = max([c.seqlen for c in constraints] )
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = False
self.init_state()
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = None
UpperCAmelCase = [constraint.copy(stateful=lowerCAmelCase ) for constraint in self.constraints]
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def a__( self : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase = constraint.advance()
if isinstance(lowerCAmelCase , lowerCAmelCase ):
token_list.append(lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
token_list.extend(lowerCAmelCase )
else:
UpperCAmelCase = self.inprogress_constraint.advance()
if isinstance(lowerCAmelCase , lowerCAmelCase ):
token_list.append(lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
token_list.extend(lowerCAmelCase )
if len(lowerCAmelCase ) == 0:
return None
else:
return token_list
def a__( self : str , lowerCAmelCase : Optional[List[int]] )-> str:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase , UpperCAmelCase = self.add(lowerCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def a__( self : str , lowerCAmelCase : int )-> Dict:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
UpperCAmelCase , UpperCAmelCase = False, False
if self.completed:
UpperCAmelCase = True
UpperCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.inprogress_constraint.update(lowerCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowerCAmelCase ) )
UpperCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowerCAmelCase ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = pending_constraint.update(lowerCAmelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(lowerCAmelCase )
UpperCAmelCase = None
if not complete and stepped:
UpperCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def a__( self : str , lowerCAmelCase : str=True )-> str:
"""simple docstring"""
UpperCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase = [
constraint.copy(stateful=lowerCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase = self.inprogress_constraint.copy(stateful=lowerCAmelCase )
UpperCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 50
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : str , A : str ):
'''simple docstring'''
if len(A ) != len(A ):
raise ValueError('''String lengths must match!''' )
UpperCAmelCase = 0
for chara, chara in zip(A , A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
@slow
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
UpperCAmelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase = model(lowerCAmelCase )['''last_hidden_state''']
UpperCAmelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCAmelCase )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : int = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = "codegen"
__magic_name__ : Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , lowerCAmelCase : Optional[int]=50400 , lowerCAmelCase : Tuple=2048 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : Optional[int]=4096 , lowerCAmelCase : str=28 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Dict=None , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=1E-5 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Union[str, Any]=50256 , lowerCAmelCase : str=50256 , lowerCAmelCase : List[Any]=False , **lowerCAmelCase : Union[str, Any] , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = n_ctx
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_inner
UpperCAmelCase = rotary_dim
UpperCAmelCase = activation_function
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : str = "default" , lowerCAmelCase : List[PatchingSpec] = None , lowerCAmelCase : bool = False , )-> Optional[Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , '''pad_token_id''' , lowerCAmelCase ):
# TODO: how to do that better?
UpperCAmelCase = 0
@property
def a__( self : Any )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='''inputs''' )
UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return self._config.n_layer
@property
def a__( self : int )-> int:
"""simple docstring"""
return self._config.n_head
def a__( self : Dict , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase = [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def a__( self : Dict )-> int:
"""simple docstring"""
return 13
| 50
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
UpperCAmelCase = 10_24
UpperCAmelCase = 40_96
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [2_56, 5_12, 10_24, 10_24]
UpperCAmelCase = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase = 7_68
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = [2_56, 5_12, 7_68, 7_68]
UpperCAmelCase = 1_50
UpperCAmelCase = 16
UpperCAmelCase = (1, 3_84, 3_84)
UpperCAmelCase = False
UpperCAmelCase = '''project'''
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 7_68
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = 1_50
UpperCAmelCase = 16
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(A , A , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
UpperCAmelCase = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
UpperCAmelCase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
UpperCAmelCase = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
UpperCAmelCase = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def lowerCamelCase__ ( A : Any , A : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A : Dict , A : List[str] , A : Any , A : Dict , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = get_dpt_config(A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase = torch.load(A , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(A )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(A )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(A , A )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(A ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(A )
model.load_state_dict(A )
model.eval()
# Check outputs on an image
UpperCAmelCase = 4_80 if '''ade''' in checkpoint_url else 3_84
UpperCAmelCase = DPTImageProcessor(size=A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(A , return_tensors='''pt''' )
# forward pass
UpperCAmelCase = model(**A ).logits if '''ade''' in checkpoint_url else model(**A ).predicted_depth
if show_prediction:
UpperCAmelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
_lowercase : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( A : Any , A : Dict , A : str , A : Optional[Any]=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
UpperCAmelCase = os.path.abspath(A )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase = torch.load(A , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase = convert_pytorch_state_dict_to_flax(A , A )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase = convert_pytorch_sharded_state_dict_to_flax(A , A )
return flax_state_dict
def lowerCamelCase__ ( A : Tuple[str] , A : np.ndarray , A : Dict[str, jnp.ndarray] , A : str , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(A : Tuple[str] ) -> bool:
return len(set(A ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(A ):
UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(A ):
UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase = pt_tuple_key[-2] + '''_v'''
if name is not None:
UpperCAmelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase = flax_model.params['''params''']
else:
UpperCAmelCase = flax_model.params
UpperCAmelCase = flatten_dict(A )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(A )
UpperCAmelCase = {}
UpperCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
return unflatten_dict(A )
def lowerCamelCase__ ( A : Optional[int] , A : str ):
'''simple docstring'''
import torch
# Load the index
UpperCAmelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase = torch.load(A )
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase = flax_model.params['''params''']
UpperCAmelCase = flatten_dict(A )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
UpperCAmelCase = flax_model.params
UpperCAmelCase = flatten_dict(A )
UpperCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase = jnp.asarray(A )
continue
if "var" in flax_key[-1]:
UpperCAmelCase = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
return unflatten_dict(A )
def lowerCamelCase__ ( A : List[str] , A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = os.path.abspath(A )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase = getattr(A , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(A , '''rb''' ) as state_f:
try:
UpperCAmelCase = from_bytes(A , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(A , A )
def lowerCamelCase__ ( A : Tuple , A : List[Any] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda A : x.dtype == jnp.bfloataa , A ) ).values()
if any(A ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCAmelCase = jax.tree_util.tree_map(
lambda A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A )
UpperCAmelCase = flatten_dict(A )
UpperCAmelCase = pt_model.state_dict()
UpperCAmelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase = []
UpperCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(A ) not in pt_model_dict:
# conv layer
UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
UpperCAmelCase = jnp.transpose(A , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A ) not in pt_model_dict:
# linear layer
UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
UpperCAmelCase = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase = '''.'''.join(A )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase = key_components[-2] + '''_v'''
if name is not None:
UpperCAmelCase = key_components[:-3] + [name]
UpperCAmelCase = '''.'''.join(A )
UpperCAmelCase = key
if flax_key in special_pt_names:
UpperCAmelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase = np.asarray(A ) if not isinstance(A , np.ndarray ) else flax_tensor
UpperCAmelCase = torch.from_numpy(A )
# remove from missing keys
missing_keys.remove(A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A )
pt_model.load_state_dict(A )
# re-transform missing_keys to list
UpperCAmelCase = list(A )
if len(A ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(A ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 50
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 1
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : int = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any] )-> str:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def a__( self : Any , lowerCAmelCase : Any=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = {}
if prompt is not None:
UpperCAmelCase = prompt
if generate_kwargs is not None:
UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Union[str, Any] , lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase : Optional[Any] )-> Dict:
"""simple docstring"""
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def a__( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=None )-> int:
"""simple docstring"""
UpperCAmelCase = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
UpperCAmelCase = self.model.config.model_type
if model_type == "git":
UpperCAmelCase = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
UpperCAmelCase = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase = None
return model_inputs
def a__( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : str=None )-> Union[str, Any]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
UpperCAmelCase = None
if generate_kwargs is None:
UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def a__( self : Dict , lowerCAmelCase : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
for output_ids in model_outputs:
UpperCAmelCase = {
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 50
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[str] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : str = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
_lowercase : Dict = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
_lowercase : Tuple = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = VOCAB_FILES_NAMES
__magic_name__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[Any] = SqueezeBertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : str="[UNK]" , lowerCAmelCase : Optional[int]="[SEP]" , lowerCAmelCase : int="[PAD]" , lowerCAmelCase : Union[str, Any]="[CLS]" , lowerCAmelCase : Union[str, Any]="[MASK]" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Union[str, Any] , )-> List[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowerCAmelCase , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowerCAmelCase )
UpperCAmelCase = do_lower_case
def a__( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any]=None )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 50
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 1
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = (EulerDiscreteScheduler,)
__magic_name__ : str = 10
def a__( self : List[str] , **lowerCAmelCase : Any )-> Any:
"""simple docstring"""
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase )
return config
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def a__( self : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase , use_karras_sigmas=lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1E-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
| 50
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : str , A : bool = False ):
'''simple docstring'''
if not isinstance(A , A ):
UpperCAmelCase = f"""Expected string as input, found {type(A )}"""
raise ValueError(A )
if not isinstance(A , A ):
UpperCAmelCase = f"""Expected boolean as use_pascal parameter, found {type(A )}"""
raise ValueError(A )
UpperCAmelCase = input_str.split('''_''' )
UpperCAmelCase = 0 if use_pascal else 1
UpperCAmelCase = words[start_index:]
UpperCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 1
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowercase : Optional[int] = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_lowercase : Optional[Any] = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_lowercase : Tuple = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowercase : List[Any] = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowercase : Tuple = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def lowerCamelCase__ ( A : str , A : int ):
'''simple docstring'''
for tf_name, hf_name in patterns:
UpperCAmelCase = k.replace(A , A )
return k
def lowerCamelCase__ ( A : dict , A : dict ):
'''simple docstring'''
UpperCAmelCase = BigBirdPegasusConfig(**A )
UpperCAmelCase = BigBirdPegasusForConditionalGeneration(A )
UpperCAmelCase = torch_model.state_dict()
UpperCAmelCase = {}
# separating decoder weights
UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
UpperCAmelCase = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
UpperCAmelCase = DECODER_PATTERNS
UpperCAmelCase = rename_state_dict_key(A , A )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase = v.T
UpperCAmelCase = torch.from_numpy(A )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
UpperCAmelCase = [k.endswith(A ) for ending in KEYS_TO_IGNORE]
if any(A ):
continue
UpperCAmelCase = REMAINING_PATTERNS
UpperCAmelCase = rename_state_dict_key(A , A )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCAmelCase = v.T
UpperCAmelCase = torch.from_numpy(A )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
UpperCAmelCase = mapping['''model.embed_positions.weight''']
UpperCAmelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCAmelCase , UpperCAmelCase = torch_model.load_state_dict(A , strict=A )
UpperCAmelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = tf.train.list_variables(A )
UpperCAmelCase = {}
UpperCAmelCase = ['''global_step''']
for name, shape in tqdm(A , desc='''converting tf checkpoint to dict''' ):
UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase = tf.train.load_variable(A , A )
UpperCAmelCase = array
return tf_weights
def lowerCamelCase__ ( A : str , A : str , A : dict ):
'''simple docstring'''
UpperCAmelCase = get_tf_weights_as_numpy(A )
UpperCAmelCase = convert_bigbird_pegasus(A , A )
torch_model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowercase : Union[str, Any] = parser.parse_args()
_lowercase : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 50
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = "speech_to_text"
__magic_name__ : int = ["past_key_values"]
__magic_name__ : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , lowerCAmelCase : int=10000 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Optional[int]=2048 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : str=4 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]="relu" , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Dict=2 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=0 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Dict=6000 , lowerCAmelCase : Optional[int]=1024 , lowerCAmelCase : Any=2 , lowerCAmelCase : int=(5, 5) , lowerCAmelCase : Dict=1024 , lowerCAmelCase : List[str]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : Any , )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
UpperCAmelCase = num_conv_layers
UpperCAmelCase = list(lowerCAmelCase )
UpperCAmelCase = conv_channels
UpperCAmelCase = input_feat_per_channel
UpperCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 50
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return number | (1 << position)
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return number & ~(1 << position)
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return number ^ (1 << position)
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : str = """▁"""
_lowercase : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_lowercase : Dict = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_lowercase : Optional[int] = {"""vinai/bartpho-syllable""": 1024}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = VOCAB_FILES_NAMES
__magic_name__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int="<s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : str="<s>" , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : List[Any]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Optional[int] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = vocab_file
UpperCAmelCase = monolingual_vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase = {}
UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = cnt
cnt += 1
with open(lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
UpperCAmelCase = line.strip().split()[0]
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , lowerCAmelCase : str )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__( self : str , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1]
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def a__( self : Dict )-> int:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : Any , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] )-> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def a__( self : Tuple , lowerCAmelCase : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(lowerCAmelCase ).replace(lowerCAmelCase , ''' ''' ).strip()
return out_string
def a__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(lowerCAmelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 50
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 1
|
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowercase : Dict = get_logger(__name__)
def lowerCamelCase__ ( A : List[Any] , A : str , A : Dict , A : int , A : List[Any]=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase = os.path.join(A , A )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase = os.path.join(A , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(f"""Saving model to {ckpt_dir}""" )
UpperCAmelCase = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=A , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def lowerCamelCase__ ( A : Optional[int] , A : List[str] , A : Union[str, Any] , A : Dict , A : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
UpperCAmelCase = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Loading model from {input_model_file}""" )
UpperCAmelCase = torch.load(A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Loading model from {input_model_file}""" )
UpperCAmelCase = torch.load(A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase = (
os.path.join(A , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
UpperCAmelCase = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=A , storage_reader=dist_cp.FileSystemReader(A ) , planner=DefaultLoadPlanner() , )
UpperCAmelCase = state_dict['''model''']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(A )
def lowerCamelCase__ ( A : str , A : Any , A : List[Any] , A : Dict , A : int , A : Optional[Any]=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase = FSDP.optim_state_dict(A , A )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCAmelCase = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(A , A )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
UpperCAmelCase = os.path.join(A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict , A : Union[str, Any] , A : Any , A : Tuple , A : int=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCAmelCase = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
UpperCAmelCase = torch.load(A )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
UpperCAmelCase = (
os.path.join(A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
UpperCAmelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(A ) , )
UpperCAmelCase = optim_state['''optimizer''']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
UpperCAmelCase = FSDP.optim_state_dict_to_load(A , A , A )
optimizer.load_state_dict(A )
| 50
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = "philschmid/bart-large-cnn-samsum"
__magic_name__ : Any = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
__magic_name__ : Optional[int] = "summarizer"
__magic_name__ : Optional[Any] = AutoTokenizer
__magic_name__ : Dict = AutoModelForSeqaSeqLM
__magic_name__ : Any = ["text"]
__magic_name__ : str = ["text"]
def a__( self : Optional[int] , lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return self.pre_processor(lowerCAmelCase , return_tensors='''pt''' , truncation=lowerCAmelCase )
def a__( self : str , lowerCAmelCase : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.model.generate(**lowerCAmelCase )[0]
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
| 50
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Tuple = logging.get_logger(__name__)
def lowerCamelCase__ ( A : Optional[int] , A : int=False , A : Dict=False , A : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def lowerCamelCase__ ( A : int , A : Tuple ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
UpperCAmelCase = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def lowerCamelCase__ ( A : Optional[Any] , A : Optional[Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = dct.pop(A )
UpperCAmelCase = val
@torch.no_grad()
def lowerCamelCase__ ( A : List[str] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=A )
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
if "vqa" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 31_29
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''vqa2-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 2
UpperCAmelCase = {0: '''False''', 1: '''True'''}
UpperCAmelCase = {v: k for k, v in config.idalabel.items()}
UpperCAmelCase = 3
UpperCAmelCase = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
UpperCAmelCase = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
UpperCAmelCase = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
UpperCAmelCase = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCAmelCase , UpperCAmelCase = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
UpperCAmelCase = ViltImageProcessor(size=3_84 )
UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCAmelCase = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
UpperCAmelCase = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
UpperCAmelCase = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
UpperCAmelCase = processor(A , A , return_tensors='''pt''' )
UpperCAmelCase = processor(A , A , return_tensors='''pt''' )
UpperCAmelCase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCAmelCase = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
UpperCAmelCase = '''a bunch of [MASK] laying on a [MASK].'''
else:
UpperCAmelCase = '''How many cats are there?'''
UpperCAmelCase = processor(A , A , return_tensors='''pt''' )
UpperCAmelCase = model(**A )
# Verify outputs
if mlm_model:
UpperCAmelCase = torch.Size([1, 11, 3_05_22] )
UpperCAmelCase = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
UpperCAmelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCAmelCase = torch.Size([1, 31_29] )
UpperCAmelCase = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
UpperCAmelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCAmelCase = torch.Size([1, 2] )
UpperCAmelCase = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowercase : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 50
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 50
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_lowercase : Any = datasets.utils.logging.get_logger(__name__)
_lowercase : Optional[int] = ["""names""", """prefix"""]
_lowercase : Any = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
_lowercase : Any = ["""encoding_errors""", """on_bad_lines"""]
_lowercase : str = ["""date_format"""]
@dataclass
class UpperCamelCase__( datasets.BuilderConfig ):
__magic_name__ : str = ","
__magic_name__ : Optional[str] = None
__magic_name__ : Optional[Union[int, List[int], str]] = "infer"
__magic_name__ : Optional[List[str]] = None
__magic_name__ : Optional[List[str]] = None
__magic_name__ : Optional[Union[int, str, List[int], List[str]]] = None
__magic_name__ : Optional[Union[List[int], List[str]]] = None
__magic_name__ : Optional[str] = None
__magic_name__ : bool = True
__magic_name__ : Optional[Literal["c", "python", "pyarrow"]] = None
__magic_name__ : Dict[Union[int, str], Callable[[Any], Any]] = None
__magic_name__ : Optional[list] = None
__magic_name__ : Optional[list] = None
__magic_name__ : bool = False
__magic_name__ : Optional[Union[int, List[int]]] = None
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[Union[str, List[str]]] = None
__magic_name__ : bool = True
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = True
__magic_name__ : Optional[str] = None
__magic_name__ : str = "."
__magic_name__ : Optional[str] = None
__magic_name__ : str = '"'
__magic_name__ : int = 0
__magic_name__ : Optional[str] = None
__magic_name__ : Optional[str] = None
__magic_name__ : Optional[str] = None
__magic_name__ : Optional[str] = None
__magic_name__ : bool = True
__magic_name__ : bool = True
__magic_name__ : int = 0
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : Optional[str] = None
__magic_name__ : int = 1_0000
__magic_name__ : Optional[datasets.Features] = None
__magic_name__ : Optional[str] = "strict"
__magic_name__ : Literal["error", "warn", "skip"] = "error"
__magic_name__ : Optional[str] = None
def a__( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
if self.delimiter is not None:
UpperCAmelCase = self.delimiter
if self.column_names is not None:
UpperCAmelCase = self.column_names
@property
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCamelCase__( datasets.ArrowBasedBuilder ):
__magic_name__ : Any = CsvConfig
def a__( self : List[Any] )-> Tuple:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def a__( self : int , lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase , (str, list, tuple) ):
UpperCAmelCase = data_files
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = [files]
UpperCAmelCase = [dl_manager.iter_files(lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = [files]
UpperCAmelCase = [dl_manager.iter_files(lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def a__( self : List[str] , lowerCAmelCase : pa.Table )-> pa.Table:
"""simple docstring"""
if self.config.features is not None:
UpperCAmelCase = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase = table_cast(lowerCAmelCase , lowerCAmelCase )
return pa_table
def a__( self : List[Any] , lowerCAmelCase : str )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase ) ):
UpperCAmelCase = pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase ):
UpperCAmelCase = pa.Table.from_pandas(lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase )}: {e}""" )
raise
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase__:
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Tuple=24 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : int=True , lowerCAmelCase : int=32 , lowerCAmelCase : Optional[Any]=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[str]=10 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : List[str]=None , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : int=2 , )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = patch_size
UpperCAmelCase = max_length
UpperCAmelCase = num_mel_bins
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = frequency_stride
UpperCAmelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase = frequency_out_dimension * time_out_dimension
UpperCAmelCase = num_patches + 2
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, input_values, labels
def a__( self : int )-> List[str]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def a__( self : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = ASTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Any = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
__magic_name__ : List[Any] = False
__magic_name__ : Any = False
__magic_name__ : Any = False
__magic_name__ : Any = False
def a__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict )-> Optional[int]:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = ASTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def a__( self : Optional[int] )-> int:
"""simple docstring"""
pass
def a__( self : str )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@slow
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ASTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(A )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Any )-> Tuple:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCAmelCase )
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase , UpperCAmelCase = prepare_audio()
UpperCAmelCase = audio.squeeze().numpy()
UpperCAmelCase = feature_extractor(lowerCAmelCase , sampling_rate=lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase__ ( A : int = 50_00 ):
'''simple docstring'''
UpperCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
UpperCAmelCase = pentagonal_nums[j]
UpperCAmelCase = pentagonal_i + pentagonal_j
UpperCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 1
|
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCamelCase__ ( A : Dict , A : Dict , A : Optional[int]=[] ):
'''simple docstring'''
UpperCAmelCase = size[0] - overlap_pixels * 2
UpperCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
UpperCAmelCase = np.pad(A , mode='''linear_ramp''' , pad_width=A , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCamelCase__ ( A : List[Any] , A : Union[str, Any] , A : Union[str, Any] ):
'''simple docstring'''
return max(A , min(A , A ) )
def lowerCamelCase__ ( A : [int] , A : [int] , A : [int] ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCamelCase__ ( A : [int] , A : int , A : [int] ):
'''simple docstring'''
UpperCAmelCase = list(A )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase = clamp_rect(A , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCamelCase__ ( A : Tuple , A : List[str] , A : List[str] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(A , (original_slice, 0) )
return result
def lowerCamelCase__ ( A : str , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase = tile.crop(A )
return tile
def lowerCamelCase__ ( A : Dict , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = n % d
return n - divisor
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Dict , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : DDPMScheduler , lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase : int = 350 , )-> List[Any]:
"""simple docstring"""
super().__init__(
vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , low_res_scheduler=lowerCAmelCase , scheduler=lowerCAmelCase , max_noise_level=lowerCAmelCase , )
def a__( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCAmelCase = add_overlap_rect(lowerCAmelCase , lowerCAmelCase , image.size )
UpperCAmelCase = image.crop(lowerCAmelCase )
UpperCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase = max(0 , lowerCAmelCase )
UpperCAmelCase = squeeze_tile(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = to_input.size
UpperCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCAmelCase = super(lowerCAmelCase , self ).__call__(image=lowerCAmelCase , **lowerCAmelCase ).images[0]
UpperCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCAmelCase = unsqueeze_tile(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCAmelCase = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
UpperCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowerCAmelCase ) , mode='''L''' , )
final_image.paste(
lowerCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowerCAmelCase )
@torch.no_grad()
def __call__( self : str , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowerCAmelCase : int = 75 , lowerCAmelCase : float = 9.0 , lowerCAmelCase : int = 50 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 128 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 32 , )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase = math.ceil(image.size[0] / tile_size )
UpperCAmelCase = math.ceil(image.size[1] / tile_size )
UpperCAmelCase = tcx * tcy
UpperCAmelCase = 0
for y in range(lowerCAmelCase ):
for x in range(lowerCAmelCase ):
self._process_tile(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , prompt=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , noise_level=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(A , revision='''fp16''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to('''cuda''' )
UpperCAmelCase = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(A : Optional[Any] ):
print(f"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('''diffusers_library_progress.jpg''' )
UpperCAmelCase = pipe(image=A , prompt='''Black font, white background, vector''' , noise_level=40 , callback=A )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 50
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCamelCase__:
__magic_name__ : Optional[Any] = PegasusConfig
__magic_name__ : Union[str, Any] = {}
__magic_name__ : List[Any] = "gelu"
def __init__( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=13 , lowerCAmelCase : Any=7 , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=99 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Tuple=37 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Union[str, Any]=40 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : List[str]=0 , )-> Tuple:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_pegasus_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def a__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : str )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = TFPegasusModel(config=lowerCAmelCase ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
UpperCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase , lowerCAmelCase , rtol=1E-3 )
def lowerCamelCase__ ( A : List[Any] , A : Union[str, Any] , A : int , A : str=None , A : List[Any]=None , A : List[Any]=None , A : Union[str, Any]=None , A : Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__magic_name__ : List[str] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__magic_name__ : str = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__magic_name__ : List[str] = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[int] = False
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = TFPegasusModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase )
def a__( self : str )-> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__( self : Optional[int] )-> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase__( unittest.TestCase ):
__magic_name__ : Union[str, Any] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__magic_name__ : Optional[Any] = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__magic_name__ : Optional[int] = "google/pegasus-xsum"
@cached_property
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__( self : Any )-> Any:
"""simple docstring"""
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__( self : str , **lowerCAmelCase : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.translate_src_text(**lowerCAmelCase )
assert self.expected_text == generated_words
def a__( self : List[Any] , **lowerCAmelCase : Tuple )-> Any:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(self.src_text , **lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase , )
UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase )
return generated_words
@slow
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 50
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 1
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowercase : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_lowercase : int = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_lowercase : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowercase : List[Any] = F"""down_blocks.{i}.resnets.{j}."""
_lowercase : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowercase : int = F"""down_blocks.{i}.attentions.{j}."""
_lowercase : Tuple = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowercase : List[str] = F"""up_blocks.{i}.resnets.{j}."""
_lowercase : Any = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowercase : Optional[Any] = F"""up_blocks.{i}.attentions.{j}."""
_lowercase : Optional[int] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowercase : Dict = F"""down_blocks.{i}.downsamplers.0.conv."""
_lowercase : Tuple = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowercase : Union[str, Any] = F"""up_blocks.{i}.upsamplers.0."""
_lowercase : Tuple = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowercase : Tuple = """mid_block.attentions.0."""
_lowercase : Optional[Any] = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowercase : int = F"""mid_block.resnets.{j}."""
_lowercase : Any = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase = v.replace(A , A )
UpperCAmelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase = v.replace(A , A )
UpperCAmelCase = v
UpperCAmelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowercase : Any = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowercase : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
_lowercase : Optional[int] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowercase : Union[str, Any] = F"""down_blocks.{i}.downsamplers.0."""
_lowercase : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowercase : Optional[int] = F"""up_blocks.{i}.upsamplers.0."""
_lowercase : Union[str, Any] = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowercase : Tuple = F"""decoder.up_blocks.{i}.resnets.{j}."""
_lowercase : int = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowercase : Any = F"""mid_block.resnets.{i}."""
_lowercase : List[Any] = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowercase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase = v.replace(A , A )
UpperCAmelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase = v.replace(A , A )
UpperCAmelCase = v
UpperCAmelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase = reshape_weight_for_sd(A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowercase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_lowercase : int = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowercase : str = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowercase : Dict = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = {}
UpperCAmelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
UpperCAmelCase = k[: -len('''.q_proj.weight''' )]
UpperCAmelCase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase = [None, None, None]
UpperCAmelCase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
UpperCAmelCase = k[: -len('''.q_proj.bias''' )]
UpperCAmelCase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase = [None, None, None]
UpperCAmelCase = v
continue
UpperCAmelCase = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] , A )
UpperCAmelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
UpperCAmelCase = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] , A )
UpperCAmelCase = torch.cat(A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
UpperCAmelCase = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] , A )
UpperCAmelCase = torch.cat(A )
return new_state_dict
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_lowercase : Any = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowercase : Optional[Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_lowercase : Union[str, Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_lowercase : Optional[Any] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowercase : List[Any] = load_file(unet_path, device="""cpu""")
else:
_lowercase : List[Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_lowercase : int = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_lowercase : Tuple = load_file(vae_path, device="""cpu""")
else:
_lowercase : int = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_lowercase : Union[str, Any] = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_lowercase : Union[str, Any] = load_file(text_enc_path, device="""cpu""")
else:
_lowercase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_lowercase : int = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_lowercase : Optional[int] = convert_unet_state_dict(unet_state_dict)
_lowercase : Dict = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowercase : Dict = convert_vae_state_dict(vae_state_dict)
_lowercase : Tuple = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowercase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowercase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_lowercase : Dict = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowercase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_lowercase : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
_lowercase : Any = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowercase : Tuple = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowercase : Tuple = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowercase : Dict = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
UpperCAmelCase = len(A )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCAmelCase = 0
print(A , end=''',''' )
# Consider rest of the activities
for j in range(A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A , end=''',''' )
UpperCAmelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Tuple = [1, 3, 0, 5, 8, 5]
_lowercase : Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A : str = "AAPL" ):
'''simple docstring'''
UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCAmelCase = BeautifulSoup(requests.get(A ).text , '''html.parser''' )
UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 50
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 1
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_lowercase : Dict = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_lowercase : Tuple = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(len(A ) ):
UpperCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A )
return next_generation
def lowerCamelCase__ ( A : list[list[int]] , A : int ):
'''simple docstring'''
UpperCAmelCase = []
for _ in range(A ):
# Create output image
UpperCAmelCase = Image.new('''RGB''' , (len(cells[0] ), len(A )) )
UpperCAmelCase = img.load()
# Save cells to image
for x in range(len(A ) ):
for y in range(len(cells[0] ) ):
UpperCAmelCase = 2_55 - cells[y][x] * 2_55
UpperCAmelCase = (colour, colour, colour)
# Save image
images.append(A )
UpperCAmelCase = new_generation(A )
return images
if __name__ == "__main__":
_lowercase : int = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 50
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase__ ( A : bool = True , *A : Tuple , **A : Tuple ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCAmelCase = False
if main_process_only:
UpperCAmelCase = PartialState().local_process_index == 0
return _tqdm(*A , **A , disable=A )
| 50
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 1
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[int] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
| 50
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Tuple = 16
_lowercase : Tuple = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : Any ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Dict , A : int ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize accelerator
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__( self : List[Any] , **lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def a__( self : str )-> str:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(lowerCAmelCase ):
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) )
@require_vision
@require_tf
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__( self : Any , **lowerCAmelCase : Union[str, Any] )-> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : Any )-> List[str]:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , tf.convert_to_tensor(lowerCAmelCase ) , tf.convert_to_tensor(lowerCAmelCase ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , np.array(lowerCAmelCase ) , np.array(lowerCAmelCase ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__( self : str , **lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def a__( self : List[Any] )-> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__( self : int )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase = [tf.convert_to_tensor(lowerCAmelCase )]
UpperCAmelCase = [torch.tensor(lowerCAmelCase )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors='''tf''' )
UpperCAmelCase = processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
| 50
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : int = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Union[str, Any] = "gpt_bigcode"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , lowerCAmelCase : Any=50257 , lowerCAmelCase : Dict=1024 , lowerCAmelCase : str=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=12 , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]="gelu_pytorch_tanh" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[str]=1E-5 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]=50256 , lowerCAmelCase : List[Any]=50256 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , **lowerCAmelCase : Tuple , )-> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_inner
UpperCAmelCase = activation_function
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = scale_attn_weights
UpperCAmelCase = use_cache
UpperCAmelCase = attention_softmax_in_fpaa
UpperCAmelCase = scale_attention_softmax_in_fpaa
UpperCAmelCase = multi_query
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
| 50
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int=7 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Optional[int]=18 , lowerCAmelCase : Union[str, Any]=30 , lowerCAmelCase : List[Any]=400 , lowerCAmelCase : Any=True , lowerCAmelCase : int=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase : int=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[Any]=False , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = do_reduce_labels
def a__( self : Any )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase = Image.open(dataset[0]['''file'''] )
UpperCAmelCase = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase = Image.open(ds[0]['''file'''] )
UpperCAmelCase = Image.open(ds[1]['''file'''] )
UpperCAmelCase = Image.open(ds[2]['''file'''] )
UpperCAmelCase = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = BeitImageProcessingTester(self )
@property
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__( self : Dict )-> int:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCAmelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
def a__( self : Tuple )-> str:
"""simple docstring"""
pass
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__( self : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__( self : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
UpperCAmelCase = []
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
UpperCAmelCase , UpperCAmelCase = prepare_semantic_single_inputs()
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
UpperCAmelCase , UpperCAmelCase = prepare_semantic_batch_inputs()
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def a__( self : Dict )-> int:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase , UpperCAmelCase = prepare_semantic_single_inputs()
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
UpperCAmelCase = True
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 50
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 1
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCamelCase__:
def a__( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] )-> Optional[int]:
"""simple docstring"""
pass
def a__( self : Dict )-> Any:
"""simple docstring"""
pass
def a__( self : List[Any] )-> Any:
"""simple docstring"""
pass
def a__( self : Union[str, Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : np.ndarray , lowerCAmelCase : float )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase , lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def a__( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=None , **lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def a__( self : int , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
UpperCAmelCase = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a__( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=None , **lowerCAmelCase : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
UpperCAmelCase = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase = model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase = after_output[0]
UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1E-3 )
def a__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
UpperCAmelCase = model(
input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase )
UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = to_atuple(vision_model.config.image_size )
UpperCAmelCase = to_atuple(vision_model.config.patch_size )
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : List[Any] )-> Any:
"""simple docstring"""
pt_model.to(lowerCAmelCase )
pt_model.eval()
# prepare inputs
UpperCAmelCase = inputs_dict
UpperCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase = pt_model(**lowerCAmelCase ).to_tuple()
UpperCAmelCase = fx_model(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase , from_pt=lowerCAmelCase )
UpperCAmelCase = fx_model_loaded(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase , from_flax=lowerCAmelCase )
pt_model_loaded.to(lowerCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase = pt_model_loaded(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase , pt_output_loaded.numpy() , 4E-2 )
def a__( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = VisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase )
UpperCAmelCase = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = VisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase )
UpperCAmelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase )
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase )
@is_pt_flax_cross_test
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = config_inputs_dict.pop('''vision_config''' )
UpperCAmelCase = config_inputs_dict.pop('''text_config''' )
UpperCAmelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.check_equivalence_flax_to_pt(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@slow
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.get_pretrained_model_and_inputs()
UpperCAmelCase = model_a(**lowerCAmelCase )
UpperCAmelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase )
UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase = model_a(**lowerCAmelCase )
UpperCAmelCase = after_outputs[0]
UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1E-5 )
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase , text_from_pt=lowerCAmelCase , )
UpperCAmelCase = 13
UpperCAmelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase = random_attention_mask([batch_size, 4] )
UpperCAmelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def a__( self : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxViTModel(lowerCAmelCase )
UpperCAmelCase = FlaxBertModel(lowerCAmelCase )
return vision_model, text_model
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = FlaxViTModelTester(self )
UpperCAmelCase = FlaxBertModelTester(self )
UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = vision_config_and_inputs
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
def a__( self : str )-> Tuple:
"""simple docstring"""
UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase , text_from_pt=lowerCAmelCase , )
UpperCAmelCase = 13
UpperCAmelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase = random_attention_mask([batch_size, 4] )
UpperCAmelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def a__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : int )-> Tuple:
"""simple docstring"""
UpperCAmelCase = FlaxCLIPVisionModel(lowerCAmelCase )
UpperCAmelCase = FlaxBertModel(lowerCAmelCase )
return vision_model, text_model
def a__( self : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase = FlaxCLIPVisionModelTester(self )
UpperCAmelCase = FlaxBertModelTester(self )
UpperCAmelCase = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = vision_config_and_inputs
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCamelCase__( unittest.TestCase ):
@slow
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase , atol=1E-3 ) )
| 50
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = 2
while True:
UpperCAmelCase = factor_map.pop(A , A )
if factor:
UpperCAmelCase = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase = factor
else:
UpperCAmelCase = prime
yield prime
prime += 1
def lowerCamelCase__ ( A : float = 1E10 ):
'''simple docstring'''
UpperCAmelCase = sieve()
UpperCAmelCase = 1
while True:
UpperCAmelCase = next(A )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(A )
n += 2
if __name__ == "__main__":
print(solution())
| 50
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__:
__magic_name__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__ : Optional[str] = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
__magic_name__ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__ : bool = field(default=lowerCAmelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__magic_name__ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase__:
__magic_name__ : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
__magic_name__ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__magic_name__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : bool = field(
default=lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
UpperCAmelCase = import_module('''tasks''' )
try:
UpperCAmelCase = getattr(A , model_args.task_type )
UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
UpperCAmelCase = dict(enumerate(A ) )
UpperCAmelCase = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
UpperCAmelCase = np.argmax(A , axis=2 )
UpperCAmelCase , UpperCAmelCase = preds.shape
UpperCAmelCase = [[] for _ in range(A )]
UpperCAmelCase = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
UpperCAmelCase , UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , A , A )
writer.write('''%s = %s\n''' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
UpperCAmelCase = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = trainer.predict(A )
UpperCAmelCase , UpperCAmelCase = align_predictions(A , A )
UpperCAmelCase = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(A , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , A , A )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
UpperCAmelCase = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(A , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 50
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__( unittest.TestCase ):
def a__( self : str , lowerCAmelCase : int , lowerCAmelCase : int )-> int:
"""simple docstring"""
UpperCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = 20
UpperCAmelCase = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase )
# tweak scores to not be uniform anymore
UpperCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase = jax.nn.softmax(lowerCAmelCase , axis=-1 )
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase , scores.copy() , cur_len=lowerCAmelCase ) , axis=-1 )
UpperCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase , scores.copy() , cur_len=lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = 10
UpperCAmelCase = 2
# create ramp distribution
UpperCAmelCase = np.broadcast_to(np.arange(lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase = FlaxTopKLogitsWarper(3 )
UpperCAmelCase = top_k_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase = 5
UpperCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase = np.broadcast_to(np.arange(lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase = top_k_warp_safety_check(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a__( self : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = 10
UpperCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase = np.exp(top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase = np.broadcast_to(np.arange(lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
UpperCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase = top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 20
UpperCAmelCase = 4
UpperCAmelCase = 0
UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase )
# check that min length is applied at length 5
UpperCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase = 5
UpperCAmelCase = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = min_dist_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = 15
UpperCAmelCase = min_dist_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertFalse(jnp.isinf(lowerCAmelCase ).any() )
def a__( self : List[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = 20
UpperCAmelCase = 4
UpperCAmelCase = 0
UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase = 1
UpperCAmelCase = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase = 3
UpperCAmelCase = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertFalse(jnp.isinf(lowerCAmelCase ).any() )
def a__( self : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = 20
UpperCAmelCase = 4
UpperCAmelCase = 0
UpperCAmelCase = 5
UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase , eos_token_id=lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase = 4
UpperCAmelCase = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase = 3
UpperCAmelCase = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
self.assertFalse(jnp.isinf(lowerCAmelCase ).any() )
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 10
UpperCAmelCase = 15
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 15
# dummy input_ids and scores
UpperCAmelCase = ids_tensor((batch_size, sequence_length) , lowerCAmelCase )
UpperCAmelCase = input_ids.copy()
UpperCAmelCase = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scores.copy()
# instantiate all dist processors
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase = FlaxTopKLogitsWarper(3 )
UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase )
UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase )
UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase , eos_token_id=lowerCAmelCase )
UpperCAmelCase = 10
# no processor list
UpperCAmelCase = temp_dist_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = top_k_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = min_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = bos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = eos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# with processor list
UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase = processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a__( self : Dict )-> str:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 10
UpperCAmelCase = 15
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 15
# dummy input_ids and scores
UpperCAmelCase = ids_tensor((batch_size, sequence_length) , lowerCAmelCase )
UpperCAmelCase = input_ids.copy()
UpperCAmelCase = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scores.copy()
# instantiate all dist processors
UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase = FlaxTopKLogitsWarper(3 )
UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase )
UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase )
UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase , eos_token_id=lowerCAmelCase )
UpperCAmelCase = 10
# no processor list
def run_no_processor_list(lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
UpperCAmelCase = temp_dist_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = top_k_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = min_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = bos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
UpperCAmelCase = eos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
return scores
# with processor list
def run_processor_list(lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[Any] ):
UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase = processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase )
return scores
UpperCAmelCase = jax.jit(lowerCAmelCase )
UpperCAmelCase = jax.jit(lowerCAmelCase )
UpperCAmelCase = jitted_run_no_processor_list(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = jitted_run_processor_list(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 50
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 1
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowercase : Optional[Any] = logging.get_logger("""transformers.models.speecht5""")
def lowerCamelCase__ ( A : List[Any] , A : int , A : Union[str, Any] ):
'''simple docstring'''
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint['''input_conv.weight_g''']
UpperCAmelCase = checkpoint['''input_conv.weight_v''']
UpperCAmelCase = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[f"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase = checkpoint[f"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase = checkpoint['''output_conv.1.weight_g''']
UpperCAmelCase = checkpoint['''output_conv.1.weight_v''']
UpperCAmelCase = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCamelCase__ ( A : str , A : Dict , A : int , A : Union[str, Any]=None , A : Optional[Any]=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(A )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(A )
UpperCAmelCase = torch.load(A )
load_weights(orig_checkpoint['''model''']['''generator'''] , A , A )
UpperCAmelCase = np.load(A )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(A ).float()
UpperCAmelCase = torch.from_numpy(A ).float()
model.save_pretrained(A )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(A )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowercase : Optional[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 50
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict )-> int:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
def __call__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCAmelCase = 1
UpperCAmelCase = self.unet(lowerCAmelCase , lowerCAmelCase ).sample
UpperCAmelCase = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
UpperCAmelCase = scheduler_output - scheduler_output + torch.ones_like(lowerCAmelCase )
return result
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 1
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( A : List[str] ):
'''simple docstring'''
UpperCAmelCase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
UpperCAmelCase = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , A )
if matches:
UpperCAmelCase = float(matches[1] )
UpperCAmelCase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase = 10_01
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(A ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase = '''background'''
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A : str , A : str , A : str , A : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase = get_mobilenet_va_config(A )
# Load 🤗 model
UpperCAmelCase = MobileNetVaForImageClassification(A ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A , A , A )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
UpperCAmelCase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , A , atol=1E-4 )
Path(A ).mkdir(exist_ok=A )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if push_to_hub:
print('''Pushing to the hub...''' )
UpperCAmelCase = '''google/''' + model_name
image_processor.push_to_hub(A )
model.push_to_hub(A )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 50
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 1
|
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase__:
def a__( self : Tuple , lowerCAmelCase : List[str] )-> str:
"""simple docstring"""
raise NotImplementedError()
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
raise NotImplementedError()
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Any , lowerCAmelCase : "AutoTokenizer" , lowerCAmelCase : bool = False , **lowerCAmelCase : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = tokenizer
UpperCAmelCase = skip_prompt
UpperCAmelCase = decode_kwargs
# variables used in the streaming process
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = True
def a__( self : Optional[int] , lowerCAmelCase : Any )-> int:
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
UpperCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
UpperCAmelCase = text[self.print_len :]
UpperCAmelCase = []
UpperCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase = text[self.print_len :]
self.print_len += len(lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(lowerCAmelCase )
self.on_finalized_text(lowerCAmelCase )
def a__( self : List[Any] )-> int:
"""simple docstring"""
if len(self.token_cache ) > 0:
UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCAmelCase = text[self.print_len :]
UpperCAmelCase = []
UpperCAmelCase = 0
else:
UpperCAmelCase = ''''''
UpperCAmelCase = True
self.on_finalized_text(lowerCAmelCase , stream_end=lowerCAmelCase )
def a__( self : Any , lowerCAmelCase : str , lowerCAmelCase : bool = False )-> Tuple:
"""simple docstring"""
print(lowerCAmelCase , flush=lowerCAmelCase , end='''''' if not stream_end else None )
def a__( self : List[str] , lowerCAmelCase : Optional[int] )-> Optional[int]:
"""simple docstring"""
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase : "AutoTokenizer" , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[float] = None , **lowerCAmelCase : List[str] )-> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = Queue()
UpperCAmelCase = None
UpperCAmelCase = timeout
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : bool = False )-> int:
"""simple docstring"""
self.text_queue.put(lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Tuple )-> Any:
"""simple docstring"""
return self
def a__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 50
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowercase : str = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "ernie_m"
__magic_name__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Any , lowerCAmelCase : int = 250002 , lowerCAmelCase : int = 768 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 3072 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : int = 514 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 1E-05 , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[str]=0.0 , **lowerCAmelCase : List[str] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = classifier_dropout
UpperCAmelCase = is_decoder
UpperCAmelCase = act_dropout
| 50
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Any = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 1
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__magic_name__ : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
__magic_name__ : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
__magic_name__ : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__magic_name__ : str = "question"
__magic_name__ : str = "context"
__magic_name__ : str = "answers"
@property
def a__( self : Optional[int] )-> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 50
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A : str , A : int ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(A ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.