code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowercase: Optional[Any] = get_logger(__name__)
class UpperCAmelCase :
def __init__( self : Optional[int], a_ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase__ = (
os.path.join(a_, config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ = Extractor
def lowercase_ ( self : Any, a_ : str ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ = os.path.abspath(a_ )
return os.path.join(self.extract_dir, hash_url_to_filename(a_ ) )
def lowercase_ ( self : Union[str, Any], a_ : str, a_ : bool ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(a_ ) and not (os.path.isdir(a_ ) and os.listdir(a_ ))
)
def lowercase_ ( self : Tuple, a_ : str, a_ : bool = False ):
"""simple docstring"""
UpperCamelCase__ = self.extractor.infer_extractor_format(a_ )
if not extractor_format:
return input_path
UpperCamelCase__ = self._get_output_path(a_ )
if self._do_extract(a_, a_ ):
self.extractor.extract(a_, a_, a_ )
return output_path
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@classmethod
@abstractmethod
def lowercase_ ( cls : Union[str, Any], a_ : Union[Path, str], **a_ : Tuple ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
...
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[bytes] = []
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : int ):
"""simple docstring"""
with open(a_, "rb" ) as f:
return f.read(a_ )
@classmethod
def lowercase_ ( cls : Any, a_ : Union[Path, str], a_ : bytes = b"" ):
"""simple docstring"""
if not magic_number:
UpperCamelCase__ = max(len(a_ ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase__ = cls.read_magic_number(a_, a_ )
except OSError:
return False
return any(magic_number.startswith(a_ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@classmethod
def lowercase_ ( cls : Dict, a_ : Union[Path, str], **a_ : List[Any] ):
"""simple docstring"""
return tarfile.is_tarfile(a_ )
@staticmethod
def lowercase_ ( a_ : Optional[Any], a_ : int ):
"""simple docstring"""
def resolved(a_ : str ) -> str:
return os.path.realpath(os.path.abspath(a_ ) )
def badpath(a_ : str, a_ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a_, a_ ) ).startswith(a_ )
def badlink(a_ : Union[str, Any], a_ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ = resolved(os.path.join(a_, os.path.dirname(info.name ) ) )
return badpath(info.linkname, base=a_ )
UpperCamelCase__ = resolved(a_ )
for finfo in members:
if badpath(finfo.name, a_ ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(a_, a_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(a_, a_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(a_, exist_ok=a_ )
UpperCamelCase__ = tarfile.open(a_ )
tar_file.extractall(a_, members=TarExtractor.safemembers(a_, a_ ) )
tar_file.close()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = [B'\x1F\x8B']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
with gzip.open(a_, "rb" ) as gzip_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def lowercase_ ( cls : Dict, a_ : Union[Path, str], a_ : bytes = b"" ):
"""simple docstring"""
if super().is_extractable(a_, magic_number=a_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a_, "rb" ) as fp:
UpperCamelCase__ = _EndRecData(a_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ = fp.read(a_ ) # CD is where we expect it to be
if len(a_ ) == sizeCentralDir:
UpperCamelCase__ = struct.unpack(a_, a_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(a_, exist_ok=a_ )
with zipfile.ZipFile(a_, "r" ) as zip_file:
zip_file.extractall(a_ )
zip_file.close()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
with lzma.open(a_ ) as compressed_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(a_, exist_ok=a_ )
UpperCamelCase__ = rarfile.RarFile(a_ )
rf.extractall(a_ )
rf.close()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = [B'\x28\xb5\x2F\xFD']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
UpperCamelCase__ = zstd.ZstdDecompressor()
with open(a_, "rb" ) as ifh, open(a_, "wb" ) as ofh:
dctx.copy_stream(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = [B'\x42\x5A\x68']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
with bza.open(a_, "rb" ) as compressed_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(a_, exist_ok=a_ )
with pyazr.SevenZipFile(a_, "r" ) as archive:
archive.extractall(a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = [B'\x04\x22\x4D\x18']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(a_, "rb" ) as compressed_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_lowerCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowercase_ ( cls : List[str] ):
"""simple docstring"""
return max(
len(a_ )
for extractor in cls.extractors.values()
if issubclass(a_, a_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : int ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(a_, magic_number_length=a_ )
except OSError:
return b""
@classmethod
def lowercase_ ( cls : Tuple, a_ : Union[Path, str], a_ : bool = False ):
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.", category=a_, )
UpperCamelCase__ = cls.infer_extractor_format(a_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowercase_ ( cls : List[Any], a_ : Union[Path, str] ): # <Added version="2.4.0"/>
"""simple docstring"""
UpperCamelCase__ = cls._get_magic_number_max_length()
UpperCamelCase__ = cls._read_magic_number(a_, a_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a_, magic_number=a_ ):
return extractor_format
@classmethod
def lowercase_ ( cls : int, a_ : Union[Path, str], a_ : Union[Path, str], a_ : Optional[str] = None, a_ : Optional[BaseExtractor] = "deprecated", ):
"""simple docstring"""
os.makedirs(os.path.dirname(a_ ), exist_ok=a_ )
# Prevent parallel extractions
UpperCamelCase__ = str(Path(a_ ).with_suffix(".lock" ) )
with FileLock(a_ ):
shutil.rmtree(a_, ignore_errors=a_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a_, a_ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead.", category=a_, )
UpperCamelCase__ = extractor if extractor != "deprecated" else extractor_format
else:
UpperCamelCase__ = cls.extractors[extractor_format]
return extractor.extract(a_, a_ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0.", category=a_, )
for extractor in cls.extractors.values():
if extractor.is_extractable(a_ ):
return extractor.extract(a_, a_ )
| 31
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 1
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__lowercase: List[str] = None
__lowercase: Union[str, Any] = logging.get_logger(__name__)
__lowercase: Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowercase: Optional[int] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__lowercase: Optional[Any] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[int] = ['input_ids', 'attention_mask']
_lowerCamelCase : Optional[int] = TaTokenizer
_lowerCamelCase : List[int] = []
def __init__( self : List[str], a_ : List[Any]=None, a_ : Union[str, Any]=None, a_ : Any="</s>", a_ : Union[str, Any]="<unk>", a_ : int="<pad>", a_ : Optional[int]=100, a_ : Union[str, Any]=None, **a_ : List[Any], ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [f'<extra_id_{i}>' for i in range(a_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCamelCase__ = len(set(filter(lambda a_ : bool("extra_id_" in str(a_ ) ), a_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
a_, tokenizer_file=a_, eos_token=a_, unk_token=a_, pad_token=a_, extra_ids=a_, additional_special_tokens=a_, **a_, )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
UpperCamelCase__ = extra_ids
@staticmethod
def lowercase_ ( a_ : List[str], a_ : str, a_ : int ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCamelCase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value.", a_, )
return max_model_length
def lowercase_ ( self : Tuple, a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file, a_ )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def lowercase_ ( self : Optional[Any], a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCamelCase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowercase_ ( self : Optional[Any], a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase_ ( self : Any ):
"""simple docstring"""
return list(
set(filter(lambda a_ : bool(re.search(R"<extra_id_\d+>", a_ ) ) is not None, self.additional_special_tokens ) ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return [self.convert_tokens_to_ids(a_ ) for token in self.get_sentinel_tokens()]
| 31
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = KandinskyImgaImgPipeline
_lowerCamelCase : Tuple = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
_lowerCamelCase : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
_lowerCamelCase : Any = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_lowerCamelCase : Any = False
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return 100
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1005, )
UpperCamelCase__ = MultilingualCLIP(a_ )
UpperCamelCase__ = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase__ = UNetaDConditionModel(**a_ )
return model
@property
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = self.dummy_tokenizer
UpperCamelCase__ = self.dummy_unet
UpperCamelCase__ = self.dummy_movq
UpperCamelCase__ = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCamelCase__ = DDIMScheduler(**a_ )
UpperCamelCase__ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase_ ( self : Any, a_ : Optional[Any], a_ : int=0 ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor((1, self.cross_attention_dim), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(a_ )
# create init_image
UpperCamelCase__ = floats_tensor((1, 3, 64, 64), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ).resize((256, 256) )
if str(a_ ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a_ )
else:
UpperCamelCase__ = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCamelCase__ = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = "cpu"
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**a_ )
UpperCamelCase__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(a_ ) )
UpperCamelCase__ = output.images
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(a_ ), return_dict=a_, )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase__ = "A red cartoon frog, 4k"
UpperCamelCase__ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.floataa )
pipe_prior.to(a_ )
UpperCamelCase__ = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1", torch_dtype=torch.floataa )
UpperCamelCase__ = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
UpperCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ = pipe_prior(
a_, generator=a_, num_inference_steps=5, negative_prompt="", ).to_tuple()
UpperCamelCase__ = pipeline(
a_, image=a_, image_embeds=a_, negative_image_embeds=a_, generator=a_, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", )
UpperCamelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a_, a_ )
| 31
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
UpperCamelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 2
while digits < n:
index += 1
UpperCamelCase__ = len(str(fibonacci(_UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(_UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 31
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 1
|
'''simple docstring'''
__lowercase: int = "Alexander Joslin"
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> int:
'''simple docstring'''
UpperCamelCase__ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_UpperCamelCase )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](_UpperCamelCase , _UpperCamelCase )
operand_stack.push(_UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__lowercase: Tuple = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 31
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase):
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), )
return model
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.dummy_uncond_unet
UpperCamelCase__ = PNDMScheduler()
UpperCamelCase__ = PNDMPipeline(unet=a_, scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pndm(generator=a_, num_inference_steps=20, output_type="numpy" ).images
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pndm(generator=a_, num_inference_steps=20, output_type="numpy", return_dict=a_ )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = "google/ddpm-cifar10-32"
UpperCamelCase__ = UNetaDModel.from_pretrained(a_ )
UpperCamelCase__ = PNDMScheduler()
UpperCamelCase__ = PNDMPipeline(unet=a_, scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pndm(generator=a_, output_type="numpy" ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 31
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__lowercase: Dict = None
__lowercase: Dict = logging.get_logger(__name__)
__lowercase: Optional[Any] = "▁"
__lowercase: List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowercase: Optional[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
__lowercase: Any = {
"google/pegasus-xsum": 512,
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[int] = PegasusTokenizer
_lowerCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any], a_ : List[Any]=None, a_ : Any=None, a_ : Dict="<pad>", a_ : Union[str, Any]="</s>", a_ : List[Any]="<unk>", a_ : Optional[Any]="<mask_2>", a_ : int="<mask_1>", a_ : str=None, a_ : Optional[int]=103, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(a_, a_ ):
raise TypeError(
f'additional_special_tokens should be of type {type(a_ )}, but is'
f' {type(a_ )}' )
UpperCamelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(a_ ), self.offset - 1 )
]
if len(set(a_ ) ) != len(a_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
UpperCamelCase__ = additional_special_tokens_extended
else:
UpperCamelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2, self.offset )]
super().__init__(
a_, tokenizer_file=a_, pad_token=a_, eos_token=a_, unk_token=a_, mask_token=a_, mask_token_sent=a_, offset=a_, additional_special_tokens=a_, **a_, )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
def lowercase_ ( self : List[Any], a_ : str ):
"""simple docstring"""
UpperCamelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ ( self : Optional[int], a_ : List, a_ : Optional[List] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(a_ )
elif token_ids_a is None:
return self._special_token_mask(a_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ ( self : Any, a_ : Dict, a_ : int=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[Any], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file, a_ )
return (out_vocab_file,)
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = 9, 14 # noqa: F841
UpperCamelCase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase__ = defaultdict(_UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCamelCase__ = mst(_UpperCamelCase )
UpperCamelCase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCamelCase__ = tuple(answer[:2] )
UpperCamelCase__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 1
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(_UpperCamelCase , 0 , _UpperCamelCase , args=(_UpperCamelCase) )[0]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
return math.pow(_UpperCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
_lowerCamelCase : float
_lowerCamelCase : TreeNode | None = None
_lowerCamelCase : TreeNode | None = None
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(_UpperCamelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_UpperCamelCase ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_UpperCamelCase : TreeNode | None , _UpperCamelCase : float , _UpperCamelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _UpperCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _UpperCamelCase )
)
return is_binary_search_tree_recursive_check(_UpperCamelCase , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase: Any = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Union[str, Any] = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[Any] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 1
|
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float = 0.1 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 3
UpperCamelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase: Optional[Any] = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__lowercase: Optional[int] = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__lowercase: str = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__lowercase: List[Any] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = 1
@register_to_config
def __init__( self : List[Any], a_ : int = 1000, a_ : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(a_ )
# standard deviation of the initial noise distribution
UpperCamelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase__ = 4
# running values
UpperCamelCase__ = []
def lowercase_ ( self : str, a_ : int, a_ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = torch.linspace(1, 0, num_inference_steps + 1 )[:-1]
UpperCamelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase__ = torch.tensor(self.config.trained_betas, dtype=torch.floataa )
else:
UpperCamelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase__ = (1.0 - self.betas**2) ** 0.5
UpperCamelCase__ = (torch.atana(self.betas, self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase__ = timesteps.to(a_ )
UpperCamelCase__ = []
def lowercase_ ( self : List[str], a_ : torch.FloatTensor, a_ : int, a_ : torch.FloatTensor, a_ : bool = True, ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
UpperCamelCase__ = (self.timesteps == timestep).nonzero().item()
UpperCamelCase__ = timestep_index + 1
UpperCamelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(a_ )
if len(self.ets ) == 1:
UpperCamelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase__ = self._get_prev_sample(a_, a_, a_, a_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def lowercase_ ( self : str, a_ : torch.FloatTensor, *a_ : Dict, **a_ : Dict ):
"""simple docstring"""
return sample
def lowercase_ ( self : Optional[Any], a_ : Dict, a_ : int, a_ : Optional[Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.alphas[timestep_index]
UpperCamelCase__ = self.betas[timestep_index]
UpperCamelCase__ = self.alphas[prev_timestep_index]
UpperCamelCase__ = self.betas[prev_timestep_index]
UpperCamelCase__ = (sample - sigma * ets) / max(a_, 1e-8 )
UpperCamelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 31
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowercase: Any = logging.get_logger(__name__)
__lowercase: Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowercase: int = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__lowercase: Union[str, Any] = {"allegro/herbert-base-cased": 514}
__lowercase: str = {}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = HerbertTokenizer
def __init__( self : List[Any], a_ : Any=None, a_ : List[Any]=None, a_ : int=None, a_ : int="<s>", a_ : Union[str, Any]="<unk>", a_ : Optional[int]="<pad>", a_ : Optional[int]="<mask>", a_ : List[str]="</s>", **a_ : Optional[int], ):
"""simple docstring"""
super().__init__(
a_, a_, tokenizer_file=a_, cls_token=a_, unk_token=a_, pad_token=a_, mask_token=a_, sep_token=a_, **a_, )
def lowercase_ ( self : Any, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : Optional[Any], a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def lowercase_ ( self : Optional[Any], a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Tuple, a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase__ = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 31
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 1
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self : Dict, a_ : List[Any], a_ : Any=13, a_ : Tuple=7, a_ : Optional[int]=True, a_ : List[Any]=True, a_ : Optional[int]=False, a_ : Optional[Any]=True, a_ : Dict=99, a_ : str=32, a_ : int=5, a_ : Optional[int]=4, a_ : Dict=37, a_ : Dict="gelu", a_ : Optional[int]=0.1, a_ : Dict=0.1, a_ : List[Any]=512, a_ : Any=16, a_ : List[Any]=2, a_ : List[str]=0.02, a_ : Dict=3, a_ : List[Any]=4, a_ : int=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : str ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a_, initializer_range=self.initializer_range, )
def lowercase_ ( self : Any, a_ : Tuple, a_ : Optional[int], a_ : str, a_ : int, a_ : Optional[Any], a_ : Union[str, Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_ )
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : int, a_ : Optional[Any], a_ : Optional[Any], a_ : str, a_ : Any, a_ : List[Any], a_ : Optional[int], a_ : Optional[int], a_ : List[Any], a_ : Any, ):
"""simple docstring"""
UpperCamelCase__ = BioGptForCausalLM(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : List[Any], a_ : Tuple, a_ : Any, a_ : int, a_ : Union[str, Any], a_ : List[str], *a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
# create attention mask
UpperCamelCase__ = torch.ones(input_ids.shape, dtype=torch.long, device=a_ )
UpperCamelCase__ = self.seq_length // 2
UpperCamelCase__ = 0
# first forward pass
UpperCamelCase__ , UpperCamelCase__ = model(a_, attention_mask=a_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 1), config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase__ = ids_tensor((1,), a_ ).item() + 1
UpperCamelCase__ = ids_tensor((self.batch_size, 1), config.vocab_size ).squeeze(-1 )
UpperCamelCase__ = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase__ = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCamelCase__ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=a_ )], dim=1, )
# get two different outputs
UpperCamelCase__ = model(a_, attention_mask=a_ )["last_hidden_state"]
UpperCamelCase__ = model(a_, past_key_values=a_, attention_mask=a_ )["last_hidden_state"]
# select random slice
UpperCamelCase__ = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : Any, a_ : Dict, a_ : Any, a_ : Any, a_ : int, a_ : Optional[Any], *a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptModel(config=a_ ).to(a_ ).eval()
UpperCamelCase__ = torch.ones(input_ids.shape, dtype=torch.long, device=a_ )
# first forward pass
UpperCamelCase__ = model(a_, attention_mask=a_, use_cache=a_ )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3), 2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask], dim=-1 )
UpperCamelCase__ = model(a_, attention_mask=a_ )["last_hidden_state"]
UpperCamelCase__ = model(a_, attention_mask=a_, past_key_values=a_ )[
"last_hidden_state"
]
# select random slice
UpperCamelCase__ = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : Optional[int], a_ : Optional[Any], a_ : int, a_ : Optional[int], a_ : str, a_ : List[str], *a_ : str, a_ : Tuple=False ):
"""simple docstring"""
UpperCamelCase__ = BioGptForCausalLM(a_ )
model.to(a_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase__ = model(a_, labels=a_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase_ ( self : Any, a_ : List[Any], *a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptModel(a_ )
UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ), 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ), 0.01 )
def lowercase_ ( self : Tuple, a_ : int, a_ : List[str], a_ : Tuple, a_ : Dict, a_ : Tuple, *a_ : int ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = BioGptForTokenClassification(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, token_type_ids=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Any = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCamelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCamelCase : int = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Any = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, hidden_size=37 )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*a_, gradient_checkpointing=a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*a_ )
@slow
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(a_ )
UpperCamelCase__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCamelCase__ = "left"
# Define PAD Token = EOS Token = 50256
UpperCamelCase__ = tokenizer.eos_token
UpperCamelCase__ = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase__ = [
"Hello, my dog is a little",
"Today, I",
]
UpperCamelCase__ = tokenizer(a_, return_tensors="pt", padding=a_ )
UpperCamelCase__ = inputs["input_ids"].to(a_ )
UpperCamelCase__ = model.generate(
input_ids=a_, attention_mask=inputs["attention_mask"].to(a_ ), )
UpperCamelCase__ = tokenizer(sentences[0], return_tensors="pt" ).input_ids.to(a_ )
UpperCamelCase__ = model.generate(input_ids=a_ )
UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
UpperCamelCase__ = tokenizer(sentences[1], return_tensors="pt" ).input_ids.to(a_ )
UpperCamelCase__ = model.generate(input_ids=a_, max_length=model.config.max_length - num_paddings )
UpperCamelCase__ = tokenizer.batch_decode(a_, skip_special_tokens=a_ )
UpperCamelCase__ = tokenizer.decode(output_non_padded[0], skip_special_tokens=a_ )
UpperCamelCase__ = tokenizer.decode(output_padded[0], skip_special_tokens=a_ )
UpperCamelCase__ = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, [non_padded_sentence, padded_sentence] )
@slow
def lowercase_ ( self : List[str] ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = BioGptModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = input_dict["input_ids"]
UpperCamelCase__ = input_ids.ne(1 ).to(a_ )
UpperCamelCase__ = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCamelCase__ = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, labels=a_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = "multi_label_classification"
UpperCamelCase__ = input_dict["input_ids"]
UpperCamelCase__ = input_ids.ne(1 ).to(a_ )
UpperCamelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, labels=a_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase ( unittest.TestCase):
@slow
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
UpperCamelCase__ = torch.tensor([[2, 4805, 9, 656, 21]] )
UpperCamelCase__ = model(a_ )[0]
UpperCamelCase__ = 4_2384
UpperCamelCase__ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape, a_ )
UpperCamelCase__ = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], a_, atol=1e-4 ) )
@slow
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCamelCase__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(a_ )
torch.manual_seed(0 )
UpperCamelCase__ = tokenizer("COVID-19 is", return_tensors="pt" ).to(a_ )
UpperCamelCase__ = model.generate(
**a_, min_length=100, max_length=1024, num_beams=5, early_stopping=a_, )
UpperCamelCase__ = tokenizer.decode(output_ids[0], skip_special_tokens=a_ )
UpperCamelCase__ = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(a_, a_ )
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 1
|
'''simple docstring'''
import os
import sys
import unittest
__lowercase: Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase: str = os.path.join(git_repo_path, "src", "transformers")
__lowercase: Dict = "\n{0} = None\n"
__lowercase: Optional[int] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
__lowercase: Optional[int] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(a_ )
UpperCamelCase__ = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(a_, "tokenizers" )
UpperCamelCase__ = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(a_, "tensorflow_text" )
UpperCamelCase__ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(a_, "sentencepiece_and_tokenizers" )
UpperCamelCase__ = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(a_, "sentencepiece_and_tensorflow_text" )
UpperCamelCase__ = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(a_, "sentencepiece_and_tokenizers_and_vision" )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch", a_ )
self.assertIn("tensorflow_text", a_ )
self.assertIn("sentencepiece_and_tokenizers", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel", objects["torch"] )
self.assertIn("TFBertModel", objects["tf"] )
self.assertIn("FlaxBertModel", objects["flax"] )
self.assertIn("BertModel", objects["torch"] )
self.assertIn("TFBertTokenizer", objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = create_dummy_object("CONSTANT", "'torch'" )
self.assertEqual(a_, "\nCONSTANT = None\n" )
UpperCamelCase__ = create_dummy_object("function", "'torch'" )
self.assertEqual(
a_, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
UpperCamelCase__ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
UpperCamelCase__ = create_dummy_object("FakeClass", "'torch'" )
self.assertEqual(a_, a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
UpperCamelCase__ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"], a_ )
| 31
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 1
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = 'encodec'
def __init__( self : int, a_ : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0], a_ : Optional[int]=2_4000, a_ : List[Any]=1, a_ : Optional[Any]=False, a_ : Optional[Any]=None, a_ : Optional[int]=None, a_ : List[Any]=128, a_ : Any=32, a_ : str=1, a_ : Optional[int]=[8, 5, 4, 2], a_ : Tuple="weight_norm", a_ : Dict=7, a_ : Union[str, Any]=7, a_ : Tuple=3, a_ : List[str]=2, a_ : Any=True, a_ : str="reflect", a_ : Optional[Any]=2, a_ : Optional[Any]=2, a_ : Union[str, Any]=1.0, a_ : Optional[Any]=1024, a_ : Optional[int]=None, a_ : str=True, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = target_bandwidths
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = audio_channels
UpperCamelCase__ = normalize
UpperCamelCase__ = chunk_length_s
UpperCamelCase__ = overlap
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_filters
UpperCamelCase__ = num_residual_layers
UpperCamelCase__ = upsampling_ratios
UpperCamelCase__ = norm_type
UpperCamelCase__ = kernel_size
UpperCamelCase__ = last_kernel_size
UpperCamelCase__ = residual_kernel_size
UpperCamelCase__ = dilation_growth_rate
UpperCamelCase__ = use_causal_conv
UpperCamelCase__ = pad_mode
UpperCamelCase__ = compress
UpperCamelCase__ = num_lstm_layers
UpperCamelCase__ = trim_right_ratio
UpperCamelCase__ = codebook_size
UpperCamelCase__ = codebook_dim if codebook_dim is not None else hidden_size
UpperCamelCase__ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**a_ )
@property
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 31
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase: int = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = ['input_features', 'is_longer']
def __init__( self : Optional[int], a_ : Optional[int]=64, a_ : List[str]=4_8000, a_ : List[str]=480, a_ : int=10, a_ : Tuple=1024, a_ : Tuple=0.0, a_ : List[Any]=False, a_ : float = 0, a_ : float = 1_4000, a_ : int = None, a_ : str = "fusion", a_ : str = "repeatpad", **a_ : int, ):
"""simple docstring"""
super().__init__(
feature_size=a_, sampling_rate=a_, padding_value=a_, return_attention_mask=a_, **a_, )
UpperCamelCase__ = top_db
UpperCamelCase__ = truncation
UpperCamelCase__ = padding
UpperCamelCase__ = fft_window_size
UpperCamelCase__ = (fft_window_size >> 1) + 1
UpperCamelCase__ = hop_length
UpperCamelCase__ = max_length_s
UpperCamelCase__ = max_length_s * sampling_rate
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = frequency_min
UpperCamelCase__ = frequency_max
UpperCamelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=a_, min_frequency=a_, max_frequency=a_, sampling_rate=a_, norm=a_, mel_scale="htk", )
UpperCamelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=a_, min_frequency=a_, max_frequency=a_, sampling_rate=a_, norm="slaney", mel_scale="slaney", )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase_ ( self : List[Any], a_ : np.array, a_ : Optional[np.array] = None ):
"""simple docstring"""
UpperCamelCase__ = spectrogram(
a_, window_function(self.fft_window_size, "hann" ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=a_, log_mel="dB", )
return log_mel_spectrogram.T
def lowercase_ ( self : Any, a_ : Dict, a_ : List[str], a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase__ = [0]
# randomly choose index for each part
UpperCamelCase__ = np.random.choice(ranges[0] )
UpperCamelCase__ = np.random.choice(ranges[1] )
UpperCamelCase__ = np.random.choice(ranges[2] )
UpperCamelCase__ = mel[idx_front : idx_front + chunk_frames, :]
UpperCamelCase__ = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCamelCase__ = mel[idx_back : idx_back + chunk_frames, :]
UpperCamelCase__ = torch.tensor(mel[None, None, :] )
UpperCamelCase__ = torch.nn.functional.interpolate(
a_, size=[chunk_frames, 64], mode="bilinear", align_corners=a_ )
UpperCamelCase__ = mel_shrink[0][0].numpy()
UpperCamelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase_ ( self : List[str], a_ : np.array, a_ : List[Any], a_ : int, a_ : List[str] ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCamelCase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCamelCase__ = len(a_ ) - max_length
UpperCamelCase__ = np.random.randint(0, overflow + 1 )
UpperCamelCase__ = waveform[idx : idx + max_length]
UpperCamelCase__ = self._np_extract_fbank_features(a_, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCamelCase__ = self._np_extract_fbank_features(a_, self.mel_filters )
UpperCamelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCamelCase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCamelCase__ = np.stack([mel, mel, mel, mel], axis=0 )
UpperCamelCase__ = False
else:
UpperCamelCase__ = self._random_mel_fusion(a_, a_, a_ )
UpperCamelCase__ = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
UpperCamelCase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCamelCase__ = int(max_length / len(a_ ) )
UpperCamelCase__ = np.stack(np.tile(a_, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCamelCase__ = int(max_length / len(a_ ) )
UpperCamelCase__ = np.stack(np.tile(a_, a_ ) )
UpperCamelCase__ = np.pad(a_, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0 )
if truncation == "fusion":
UpperCamelCase__ = self._np_extract_fbank_features(a_, self.mel_filters )
UpperCamelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
UpperCamelCase__ = self._np_extract_fbank_features(a_, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Tuple, a_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], a_ : str = None, a_ : Optional[str] = None, a_ : Optional[int] = None, a_ : Optional[int] = None, a_ : Optional[Union[str, TensorType]] = None, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = truncation if truncation is not None else self.truncation
UpperCamelCase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase__ = isinstance(a_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
UpperCamelCase__ = is_batched_numpy or (
isinstance(a_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ = [np.asarray(a_, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a_, np.ndarray ):
UpperCamelCase__ = np.asarray(a_, dtype=np.floataa )
elif isinstance(a_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ = [np.asarray(a_ )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCamelCase__ = [
self._get_input_mel(a_, max_length if max_length else self.nb_max_samples, a_, a_ )
for waveform in raw_speech
]
UpperCamelCase__ = []
UpperCamelCase__ = []
for mel, longer in padded_inputs:
input_mel.append(a_ )
is_longer.append(a_ )
if truncation == "fusion" and sum(a_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCamelCase__ = np.random.randint(0, len(a_ ) )
UpperCamelCase__ = True
if isinstance(input_mel[0], a_ ):
UpperCamelCase__ = [np.asarray(a_, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCamelCase__ = [[longer] for longer in is_longer]
UpperCamelCase__ = {"input_features": input_mel, "is_longer": is_longer}
UpperCamelCase__ = BatchFeature(a_ )
if return_tensors is not None:
UpperCamelCase__ = input_features.convert_to_tensors(a_ )
return input_features
| 31
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Callable[[int | float], int | float] , _UpperCamelCase : int | float , _UpperCamelCase : int | float , _UpperCamelCase : int = 1_00 , ) -> float:
'''simple docstring'''
UpperCamelCase__ = x_start
UpperCamelCase__ = fnc(_UpperCamelCase )
UpperCamelCase__ = 0.0
for _ in range(_UpperCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCamelCase__ = (x_end - x_start) / steps + xa
UpperCamelCase__ = fnc(_UpperCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCamelCase__ = xa
UpperCamelCase__ = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__lowercase: Tuple = 10
while i <= 100_000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 31
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 1
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 1
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Tuple = FlaxAutoencoderKL
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.uniform(a_, ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
| 31
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 1
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase: int = "src/diffusers"
__lowercase: List[str] = "."
# This is to make sure the diffusers module imported is the one in the repo.
__lowercase: str = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowercase: List[Any] = spec.loader.load_module()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , _UpperCamelCase ) is not None
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Any:
'''simple docstring'''
UpperCamelCase__ = object_name.split("." )
UpperCamelCase__ = 0
# First let's find the module where our object lives.
UpperCamelCase__ = parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase , F'{module}.py' ) ):
i += 1
if i < len(_UpperCamelCase ):
UpperCamelCase__ = os.path.join(_UpperCamelCase , parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_UpperCamelCase , F'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase__ = ""
UpperCamelCase__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase__ = line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index] , _UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase__ = lines[start_index:line_index]
return "".join(_UpperCamelCase )
__lowercase: Tuple = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
__lowercase: Any = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
__lowercase: List[str] = re.compile(r"<FILL\s+[^>]*>")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = code.split("\n" )
UpperCamelCase__ = 0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
UpperCamelCase__ = F'class Bla:\n{code}'
UpperCamelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_UpperCamelCase )
UpperCamelCase__ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
UpperCamelCase__ , UpperCamelCase__ = style_docstrings_in_code(_UpperCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int=False ) -> Any:
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = []
UpperCamelCase__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
UpperCamelCase__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = search.groups()
UpperCamelCase__ = find_code_in_diffusers(_UpperCamelCase )
UpperCamelCase__ = get_indent(_UpperCamelCase )
UpperCamelCase__ = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase__ = theoretical_indent
UpperCamelCase__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase__ = True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _should_continue(_UpperCamelCase , _UpperCamelCase ) and re.search(F'^{indent}# End copy' , _UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase__ = lines[start_index:line_index]
UpperCamelCase__ = "".join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase__ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_UpperCamelCase ) is None]
UpperCamelCase__ = "\n".join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = replace_pattern.replace("with" , "" ).split("," )
UpperCamelCase__ = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = pattern.groups()
UpperCamelCase__ = re.sub(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if option.strip() == "all-casing":
UpperCamelCase__ = re.sub(obja.lower() , obja.lower() , _UpperCamelCase )
UpperCamelCase__ = re.sub(obja.upper() , obja.upper() , _UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase__ = blackify(lines[start_index - 1] + theoretical_code )
UpperCamelCase__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCamelCase__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase__ = start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
return diffs
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = glob.glob(os.path.join(_UpperCamelCase , "**/*.py" ) , recursive=_UpperCamelCase )
UpperCamelCase__ = []
for filename in all_files:
UpperCamelCase__ = is_copy_consistent(_UpperCamelCase , _UpperCamelCase )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join(_UpperCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__lowercase: Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowercase: List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 31
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = ""
else:
UpperCamelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = ViTMSNConfig()
UpperCamelCase__ = 10_00
UpperCamelCase__ = "datasets/huggingface/label-files"
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ = 3_84
UpperCamelCase__ = 15_36
UpperCamelCase__ = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ = 7
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
UpperCamelCase__ = ViTMSNModel(_UpperCamelCase )
UpperCamelCase__ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )["target_encoder"]
UpperCamelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCamelCase )
UpperCamelCase__ = create_rename_keys(_UpperCamelCase , base_model=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , base_model=_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
UpperCamelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=_UpperCamelCase , image_std=_UpperCamelCase )
UpperCamelCase__ = image_processor(images=_UpperCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ = model(**_UpperCamelCase )
UpperCamelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
UpperCamelCase__ = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCamelCase , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__lowercase: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__lowercase: Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 31
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 1
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = (UnCLIPScheduler,)
def lowercase_ ( self : str, **a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**a_ )
return config
def lowercase_ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def lowercase_ ( self : int ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=a_, prev_timestep=a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(variance_type="fixed_small_log" )
UpperCamelCase__ = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(variance_type="learned_range" )
UpperCamelCase__ = scheduler_class(**a_ )
UpperCamelCase__ = 0.5
assert scheduler._get_variance(1, predicted_variance=a_ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487, predicted_variance=a_ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999, predicted_variance=a_ ) - -0.0_010_011 < 1e-5
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(a_ ):
# 1. predict noise residual
UpperCamelCase__ = model(a_, a_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ = scheduler.step(a_, a_, a_, generator=a_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
scheduler.set_timesteps(25 )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(a_ ):
# 1. predict noise residual
UpperCamelCase__ = model(a_, a_ )
if i + 1 == timesteps.shape[0]:
UpperCamelCase__ = None
else:
UpperCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ = scheduler.step(
a_, a_, a_, prev_timestep=a_, generator=a_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowercase_ ( self : Tuple ):
"""simple docstring"""
pass
| 31
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 1
|
'''simple docstring'''
__lowercase: Union[str, Any] = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 1
|
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowercase: Optional[Any] = data_utils.TransfoXLTokenizer
__lowercase: Union[str, Any] = data_utils.TransfoXLCorpus
__lowercase: Any = data_utils
__lowercase: Optional[Any] = data_utils
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCamelCase , "rb" ) as fp:
UpperCamelCase__ = pickle.load(_UpperCamelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase__ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
UpperCamelCase__ = corpus.vocab.__dict__
torch.save(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , _UpperCamelCase )
UpperCamelCase__ = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(_UpperCamelCase , _UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase__ = TransfoXLConfig()
else:
UpperCamelCase__ = TransfoXLConfig.from_json_file(_UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
UpperCamelCase__ = TransfoXLLMHeadModel(_UpperCamelCase )
UpperCamelCase__ = load_tf_weights_in_transfo_xl(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
UpperCamelCase__ = os.path.join(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F'Save PyTorch model to {os.path.abspath(_UpperCamelCase )}' )
torch.save(model.state_dict() , _UpperCamelCase )
print(F'Save configuration file to {os.path.abspath(_UpperCamelCase )}' )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase: Any = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__lowercase: Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from torch import nn
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowercase: str = ["bert-base-uncased", "bert-base-cased"]
__lowercase: str = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class UpperCAmelCase ( tf.keras.Model):
def __init__( self : Dict, a_ : Optional[int] ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(a_ )
UpperCamelCase__ = TFAutoModel.from_config(a_ )
def lowercase_ ( self : Optional[int], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer(a_ )
UpperCamelCase__ = self.bert(**a_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(a_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(a_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(a_, use_fast_bert_tokenizer=a_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCamelCase__ = list(zip(self.test_sentences, self.test_sentences[::-1] ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(a_, return_tensors="tf", padding="longest" )
UpperCamelCase__ = tf_tokenizer(a_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key], tf.intaa ) == tf_outputs[key] ) )
@slow
def lowercase_ ( self : int ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences], text_pair=[sentence[1] for sentence in self.paired_sentences], )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key], tf.intaa ) == separated_outputs[key] ) )
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(a_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(a_ )
UpperCamelCase__ = compiled_tokenizer(a_ )
UpperCamelCase__ = tf_tokenizer(a_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=a_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(a_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(a_ ) / "saved.model"
model.save(a_ )
UpperCamelCase__ = tf.keras.models.load_model(a_ )
UpperCamelCase__ = loaded_model(a_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ), 1e-5 )
| 31
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Optional[Any] = IFInpaintingSuperResolutionPipeline
_lowerCamelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
_lowerCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'})
_lowerCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase_ ( self : Any, a_ : Optional[Any], a_ : int=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a_ )
else:
UpperCamelCase__ = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCamelCase__ = floats_tensor((1, 3, 16, 16), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA" )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
self._test_save_load_local()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 31
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase: Tuple = logging.get_logger(__name__)
__lowercase: Union[str, Any] = {"vocab_file": "spiece.model"}
__lowercase: Tuple = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
__lowercase: str = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
__lowercase: List[str] = "▁"
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any], a_ : List[Any], a_ : str=True, a_ : Union[str, Any]=True, a_ : Tuple=False, a_ : Any="[CLS]", a_ : Optional[Any]="[SEP]", a_ : str="<unk>", a_ : List[str]="[SEP]", a_ : str="<pad>", a_ : List[Any]="[CLS]", a_ : Dict="[MASK]", a_ : Optional[Dict[str, Any]] = None, **a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = (
AddedToken(a_, lstrip=a_, rstrip=a_, normalized=a_ )
if isinstance(a_, a_ )
else mask_token
)
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_, remove_space=a_, keep_accents=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = remove_space
UpperCamelCase__ = keep_accents
UpperCamelCase__ = vocab_file
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return len(self.sp_model )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self : Optional[Any], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : List[Any], a_ : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCamelCase__ = " ".join(inputs.strip().split() )
else:
UpperCamelCase__ = inputs
UpperCamelCase__ = outputs.replace("``", "\"" ).replace("''", "\"" )
if not self.keep_accents:
UpperCamelCase__ = unicodedata.normalize("NFKD", a_ )
UpperCamelCase__ = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
UpperCamelCase__ = outputs.lower()
return outputs
def lowercase_ ( self : Any, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self.preprocess_text(a_ )
UpperCamelCase__ = self.sp_model.encode(a_, out_type=a_ )
UpperCamelCase__ = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
UpperCamelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_, "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase__ = cur_pieces[1:]
else:
UpperCamelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def lowercase_ ( self : Any, a_ : Optional[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(a_ )
def lowercase_ ( self : Optional[Any], a_ : Optional[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(a_ )
def lowercase_ ( self : Optional[int], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = ""
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(a_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def lowercase_ ( self : Tuple, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : str, a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1]
def lowercase_ ( self : int, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Optional[Any], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 1
|
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowercase: List[str] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
__lowercase: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = "https://pypi.org/pypi/diffusers/json"
UpperCamelCase__ = json.loads(request.urlopen(_UpperCamelCase ).read() )["releases"].keys()
return sorted(_UpperCamelCase , key=lambda _UpperCamelCase : version.Version(_UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__( ) -> str:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCamelCase__ = Path(_UpperCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, os.PathLike] ) -> int:
'''simple docstring'''
init_hf_modules()
UpperCamelCase__ = Path(_UpperCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCamelCase__ = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" ) as f:
UpperCamelCase__ = f.read()
# Imports of the form `import .xxx`
UpperCamelCase__ = re.findall("^\s*import\s+\.(\S+)\s*$" , _UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _UpperCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ = False
UpperCamelCase__ = [module_file]
UpperCamelCase__ = []
# Let's recurse through all relative imports
while not no_change:
UpperCamelCase__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_UpperCamelCase ) )
UpperCamelCase__ = Path(_UpperCamelCase ).parent
UpperCamelCase__ = [str(module_path / m ) for m in new_imports]
UpperCamelCase__ = [f for f in new_import_files if f not in all_relative_imports]
UpperCamelCase__ = [F'{f}.py' for f in new_import_files]
UpperCamelCase__ = len(_UpperCamelCase ) == 0
all_relative_imports.extend(_UpperCamelCase )
return all_relative_imports
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" ) as f:
UpperCamelCase__ = f.read()
# Imports of the form `import xxx`
UpperCamelCase__ = re.findall("^\s*import\s+(\S+)\s*$" , _UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _UpperCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
UpperCamelCase__ = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
UpperCamelCase__ = list(set(_UpperCamelCase ) )
UpperCamelCase__ = []
for imp in imports:
try:
importlib.import_module(_UpperCamelCase )
except ImportError:
missing_packages.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'{", ".join(_UpperCamelCase )}. Run `pip install {" ".join(_UpperCamelCase )}`' )
return get_relative_imports(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = module_path.replace(os.path.sep , "." )
UpperCamelCase__ = importlib.import_module(_UpperCamelCase )
if class_name is None:
return find_pipeline_class(_UpperCamelCase )
return getattr(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
UpperCamelCase__ = dict(inspect.getmembers(_UpperCamelCase , inspect.isclass ) )
UpperCamelCase__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _UpperCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
UpperCamelCase__ = cls
return pipeline_class
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, os.PathLike] , _UpperCamelCase : str , _UpperCamelCase : Optional[Union[str, os.PathLike]] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[Dict[str, str]] = None , _UpperCamelCase : Optional[Union[bool, str]] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = False , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
UpperCamelCase__ = module_file_or_url
UpperCamelCase__ = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
UpperCamelCase__ = get_diffusers_versions()
# cut ".dev0"
UpperCamelCase__ = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
UpperCamelCase__ = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
UpperCamelCase__ = F'v{revision}'
elif revision == "main":
UpperCamelCase__ = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
UpperCamelCase__ = COMMUNITY_PIPELINES_URL.format(revision=_UpperCamelCase , pipeline=_UpperCamelCase )
try:
UpperCamelCase__ = cached_download(
_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , local_files_only=_UpperCamelCase , use_auth_token=_UpperCamelCase , )
UpperCamelCase__ = "git"
UpperCamelCase__ = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
UpperCamelCase__ = hf_hub_download(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , local_files_only=_UpperCamelCase , use_auth_token=_UpperCamelCase , )
UpperCamelCase__ = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
UpperCamelCase__ = check_imports(_UpperCamelCase )
# Now we move the module inside our cached dynamic modules.
UpperCamelCase__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_UpperCamelCase )
UpperCamelCase__ = Path(_UpperCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_UpperCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
UpperCamelCase__ = F'{module_needed}.py'
shutil.copy(os.path.join(_UpperCamelCase , _UpperCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = use_auth_token
elif use_auth_token is True:
UpperCamelCase__ = HfFolder.get_token()
else:
UpperCamelCase__ = None
UpperCamelCase__ = model_info(_UpperCamelCase , revision=_UpperCamelCase , token=_UpperCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCamelCase__ = submodule_path / commit_hash
UpperCamelCase__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_UpperCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_UpperCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_UpperCamelCase , F'{module_needed}.py' , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
return os.path.join(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, os.PathLike] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, os.PathLike]] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[Dict[str, str]] = None , _UpperCamelCase : Optional[Union[bool, str]] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = False , **_UpperCamelCase : Optional[int] , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = get_cached_module_file(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
return get_class_in_module(_UpperCamelCase , final_module.replace(".py" , "" ) )
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] )
UpperCamelCase__ = np.array(_UpperCamelCase )
UpperCamelCase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCamelCase ) ) , x.transpose() ) , _UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = (1, 2, 1)
UpperCamelCase__ = (1, 1, 0, 7)
UpperCamelCase__ = SARIMAX(
_UpperCamelCase , exog=_UpperCamelCase , order=_UpperCamelCase , seasonal_order=_UpperCamelCase )
UpperCamelCase__ = model.fit(disp=_UpperCamelCase , maxiter=6_00 , method="nm" )
UpperCamelCase__ = model_fit.predict(1 , len(_UpperCamelCase ) , exog=[test_match] )
return result[0]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = regressor.predict(_UpperCamelCase )
return y_pred[0]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
train_user.sort()
UpperCamelCase__ = np.percentile(_UpperCamelCase , 25 )
UpperCamelCase__ = np.percentile(_UpperCamelCase , 75 )
UpperCamelCase__ = qa - qa
UpperCamelCase__ = qa - (iqr * 0.1)
return low_lim
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : float ) -> bool:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase__ = not_safe + 1
else:
if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__lowercase: Dict = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
__lowercase: int = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__lowercase: Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
__lowercase: int = normalize_df[:, 2].tolist()
__lowercase: List[Any] = normalize_df[:, 0].tolist()
__lowercase: Optional[int] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__lowercase: List[Any] = normalize_df[:, [1, 2]].tolist()
__lowercase: str = x[: len(x) - 1]
__lowercase: Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
__lowercase: Optional[Any] = total_date[: len(total_date) - 1]
__lowercase: List[str] = total_user[: len(total_user) - 1]
__lowercase: Dict = total_match[: len(total_match) - 1]
__lowercase: Union[str, Any] = total_date[len(total_date) - 1 :]
__lowercase: List[Any] = total_user[len(total_user) - 1 :]
__lowercase: Union[str, Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__lowercase: Dict = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__lowercase: Union[str, Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 31
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: List[Any] = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = 'camembert'
def __init__( self : Optional[Any], a_ : Any=3_0522, a_ : Union[str, Any]=768, a_ : str=12, a_ : int=12, a_ : str=3072, a_ : Tuple="gelu", a_ : str=0.1, a_ : List[Any]=0.1, a_ : Union[str, Any]=512, a_ : Optional[Any]=2, a_ : str=0.02, a_ : Union[str, Any]=1e-1_2, a_ : Any=1, a_ : Union[str, Any]=0, a_ : str=2, a_ : Tuple="absolute", a_ : Union[str, Any]=True, a_ : Union[str, Any]=None, **a_ : Optional[int], ):
"""simple docstring"""
super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 31
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase :
def __init__( self : Tuple, a_ : Optional[Any], a_ : Union[str, Any]=2, a_ : List[str]=32, a_ : List[str]=16, a_ : Any=3, a_ : Any=True, a_ : List[str]=True, a_ : Dict=32, a_ : int=4, a_ : Optional[int]=[0, 1, 2, 3], a_ : Dict=4, a_ : str=37, a_ : Tuple="gelu", a_ : int=0.1, a_ : Optional[int]=0.1, a_ : int=0.02, a_ : str=3, a_ : Any=[1, 384, 24, 24], a_ : Optional[Any]=True, a_ : Dict=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = backbone_out_indices
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = backbone_featmap_shape
UpperCamelCase__ = scope
UpperCamelCase__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=a_, backbone_featmap_shape=self.backbone_featmap_shape, )
def lowercase_ ( self : List[str], a_ : str, a_ : Optional[int], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = DPTModel(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any], a_ : Tuple, a_ : Tuple, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def lowercase_ ( self : Optional[int], a_ : Any, a_ : Tuple, a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_lowerCamelCase : Tuple = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : List[str] = False
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = DPTModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def lowercase_ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def lowercase_ ( self : int ):
"""simple docstring"""
pass
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
if model_class in get_values(a_ ):
continue
UpperCamelCase__ = model_class(a_ )
model.to(a_ )
model.train()
UpperCamelCase__ = self._prepare_for_class(a_, a_, return_labels=a_ )
UpperCamelCase__ = model(**a_ ).loss
loss.backward()
def lowercase_ ( self : Any ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = False
UpperCamelCase__ = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase__ = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase__ = self._prepare_for_class(a_, a_, return_labels=a_ )
UpperCamelCase__ = model(**a_ ).loss
loss.backward()
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = _config_zero_init(a_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(config=a_ )
# Skip the check for the backbone
UpperCamelCase__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase__ = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase_ ( self : Any ):
"""simple docstring"""
pass
@slow
def lowercase_ ( self : List[str] ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase__ = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = "add"
with self.assertRaises(a_ ):
UpperCamelCase__ = DPTForDepthEstimation(a_ )
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
UpperCamelCase__ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=a_, return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**a_ )
UpperCamelCase__ = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase__ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape, a_ )
UpperCamelCase__ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, a_, atol=1e-4 ) )
| 31
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 1
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = mock.Mock()
UpperCamelCase__ = 500
UpperCamelCase__ = {}
UpperCamelCase__ = HTTPError
UpperCamelCase__ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=a_ ) as mock_head:
UpperCamelCase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = mock.Mock()
UpperCamelCase__ = 500
UpperCamelCase__ = {}
UpperCamelCase__ = HTTPError
UpperCamelCase__ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=a_ ) as mock_head:
UpperCamelCase__ = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
UpperCamelCase__ = tempfile.mktemp()
with open(a_, "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", a_ )
UpperCamelCase__ = AlbertTokenizer.from_pretrained(a_ )
finally:
os.remove(a_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json", "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", a_ )
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : Dict = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowercase_ ( cls : Any ):
"""simple docstring"""
UpperCamelCase__ = TOKEN
HfFolder.save_token(a_ )
@classmethod
def lowercase_ ( cls : List[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a_, "vocab.txt" )
with open(a_, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = BertTokenizer(a_ )
tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a_, repo_id="test-tokenizer", push_to_hub=a_, use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a_, "vocab.txt" )
with open(a_, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = BertTokenizer(a_ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a_, repo_id="valid_org/test-tokenizer-org", push_to_hub=a_, use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def lowercase_ ( self : Tuple ):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a_, "vocab.txt" )
with open(a_, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = CustomTokenizer(a_ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
UpperCamelCase__ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a_, "vocab.txt" )
with open(a_, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = BertTokenizerFast.from_pretrained(a_ )
bert_tokenizer.save_pretrained(a_ )
UpperCamelCase__ = CustomTokenizerFast.from_pretrained(a_ )
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
UpperCamelCase__ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast" )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer', use_fast=a_, trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS]", " This is a ", "extra_id_100"] )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ), ["A", "BC"] )
self.assertEqual(trie.split("BCA" ), ["BC", "A"] )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ), ["AB", "C"] )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ), ["ABC", "D"] )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = Trie()
UpperCamelCase__ = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3] )
self.assertEqual(a_, ["AB", "C"] )
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCamelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
UpperCamelCase__ = os.path.join(self.tmpdirname, a_ )
with open(self.image_processor_file, "w", encoding="utf-8" ) as fp:
json.dump(a_, a_ )
def lowercase_ ( self : List[Any], **a_ : List[str] ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : int, **a_ : Dict ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], **a_ : List[Any] ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(a_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = AlignProcessor(tokenizer=a_, image_processor=a_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=a_ )
UpperCamelCase__ = AlignProcessor(tokenizer=a_, image_processor=a_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, a_ )
self.assertIsInstance(processor_fast.tokenizer, a_ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, a_ )
self.assertIsInstance(processor_fast.image_processor, a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)" )
UpperCamelCase__ = self.get_image_processor(do_normalize=a_, padding_value=1.0 )
UpperCamelCase__ = AlignProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=a_, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, a_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = AlignProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(a_, return_tensors="np" )
UpperCamelCase__ = processor(images=a_, return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = AlignProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = processor(text=a_ )
UpperCamelCase__ = tokenizer(a_, padding="max_length", max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = AlignProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=a_, images=a_ )
self.assertListEqual(list(inputs.keys() ), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = AlignProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(a_ )
UpperCamelCase__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = AlignProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=a_, images=a_ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 1
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase: Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = ['pixel_values']
def __init__( self : Optional[Any], a_ : bool = True, a_ : Dict[str, int] = None, a_ : int = 0.9, a_ : PILImageResampling = PILImageResampling.BICUBIC, a_ : bool = True, a_ : Dict[str, int] = None, a_ : Union[int, float] = 1 / 255, a_ : bool = True, a_ : bool = True, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, **a_ : Any, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 224}
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase__ = get_size_dict(a_, param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = crop_pct
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : Any, a_ : np.ndarray, a_ : Dict[str, int], a_ : Optional[float] = None, a_ : PILImageResampling = PILImageResampling.BICUBIC, a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Dict, ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCamelCase__ = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCamelCase__ = int(size["height"] / crop_pct )
else:
UpperCamelCase__ = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(a_ ) )
UpperCamelCase__ = get_resize_output_image_size(a_, size=a_, default_to_square=a_ )
else:
if "shortest_edge" in size:
UpperCamelCase__ = get_resize_output_image_size(a_, size=size["shortest_edge"], default_to_square=a_ )
elif "height" in size and "width" in size:
UpperCamelCase__ = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(a_ ) )
return resize(a_, size=a_, resample=a_, data_format=a_, **a_ )
def lowercase_ ( self : Any, a_ : np.ndarray, a_ : Dict[str, int], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(a_, size=(size["height"], size["width"]), data_format=a_, **a_ )
def lowercase_ ( self : List[Any], a_ : np.ndarray, a_ : Union[int, float], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : int, ):
"""simple docstring"""
return rescale(a_, scale=a_, data_format=a_, **a_ )
def lowercase_ ( self : Dict, a_ : np.ndarray, a_ : Union[float, List[float]], a_ : Union[float, List[float]], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Tuple, ):
"""simple docstring"""
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def lowercase_ ( self : str, a_ : ImageInput, a_ : bool = None, a_ : Dict[str, int] = None, a_ : int = None, a_ : PILImageResampling = None, a_ : bool = None, a_ : Dict[str, int] = None, a_ : bool = None, a_ : float = None, a_ : bool = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[str, TensorType]] = None, a_ : ChannelDimension = ChannelDimension.FIRST, **a_ : str, ):
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a_, param_name="crop_size" )
UpperCamelCase__ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a_, size=a_, crop_pct=a_, resample=a_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a_, size=a_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a_, a_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a_, tensor_type=a_ )
| 31
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
def __init__( self : Dict, a_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(a_ ) != 0:
UpperCamelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(a_ ) != cols:
raise error
for value in row:
if not isinstance(a_, (int, float) ):
raise error
UpperCamelCase__ = rows
else:
UpperCamelCase__ = []
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ ( self : Any, a_ : int, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(a_ ).determinant()
def lowercase_ ( self : int, a_ : int, a_ : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(a_, a_ )
return -1 * self.get_minor(a_, a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(a_, a_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[str] ):
"""simple docstring"""
return str(self.rows )
def __str__( self : int ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(a_ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ ( self : int, a_ : list[int], a_ : int | None = None ):
"""simple docstring"""
UpperCamelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(a_, a_ ):
raise type_error
for value in row:
if not isinstance(a_, (int, float) ):
raise type_error
if len(a_ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(a_ )
else:
UpperCamelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ ( self : str, a_ : list[int], a_ : int | None = None ):
"""simple docstring"""
UpperCamelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(a_, a_ ):
raise type_error
for value in column:
if not isinstance(a_, (int, float) ):
raise type_error
if len(a_ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCamelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCamelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : int, a_ : object ):
"""simple docstring"""
if not isinstance(a_, a_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : int, a_ : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : Optional[Any], a_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any, a_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple, a_ : Matrix | int | float ):
"""simple docstring"""
if isinstance(a_, (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(a_, a_ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(a_, a_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : int, a_ : int ):
"""simple docstring"""
if not isinstance(a_, a_ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCamelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ ( cls : str, a_ : list[int], a_ : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = ['sentencepiece']
def __init__( self : str, *a_ : Any, **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = ['sentencepiece']
def __init__( self : int, *a_ : int, **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = ['sentencepiece']
def __init__( self : Union[str, Any], *a_ : Tuple, **a_ : int ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = ['sentencepiece']
def __init__( self : str, *a_ : int, **a_ : int ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[int] = ['sentencepiece']
def __init__( self : str, *a_ : Tuple, **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = ['sentencepiece']
def __init__( self : str, *a_ : Union[str, Any], **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[int] = ['sentencepiece']
def __init__( self : Optional[int], *a_ : Optional[int], **a_ : Tuple ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = ['sentencepiece']
def __init__( self : Tuple, *a_ : Union[str, Any], **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = ['sentencepiece']
def __init__( self : Any, *a_ : Union[str, Any], **a_ : Tuple ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = ['sentencepiece']
def __init__( self : Any, *a_ : List[Any], **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = ['sentencepiece']
def __init__( self : Union[str, Any], *a_ : Optional[Any], **a_ : str ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = ['sentencepiece']
def __init__( self : Dict, *a_ : List[str], **a_ : Tuple ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = ['sentencepiece']
def __init__( self : Any, *a_ : List[str], **a_ : Dict ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = ['sentencepiece']
def __init__( self : int, *a_ : List[str], **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = ['sentencepiece']
def __init__( self : Union[str, Any], *a_ : List[str], **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = ['sentencepiece']
def __init__( self : Union[str, Any], *a_ : List[str], **a_ : List[str] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['sentencepiece']
def __init__( self : Optional[int], *a_ : List[Any], **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['sentencepiece']
def __init__( self : Optional[int], *a_ : Optional[int], **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = ['sentencepiece']
def __init__( self : Dict, *a_ : Any, **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Tuple = ['sentencepiece']
def __init__( self : str, *a_ : Tuple, **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = ['sentencepiece']
def __init__( self : List[Any], *a_ : List[Any], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = ['sentencepiece']
def __init__( self : Dict, *a_ : str, **a_ : Dict ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = ['sentencepiece']
def __init__( self : Union[str, Any], *a_ : Tuple, **a_ : List[str] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = ['sentencepiece']
def __init__( self : Dict, *a_ : List[str], **a_ : Any ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = ['sentencepiece']
def __init__( self : Union[str, Any], *a_ : Optional[int], **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = ['sentencepiece']
def __init__( self : str, *a_ : int, **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = ['sentencepiece']
def __init__( self : str, *a_ : Tuple, **a_ : List[str] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['sentencepiece']
def __init__( self : str, *a_ : int, **a_ : List[str] ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = ['sentencepiece']
def __init__( self : List[Any], *a_ : List[Any], **a_ : str ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['sentencepiece']
def __init__( self : Optional[int], *a_ : Optional[Any], **a_ : Dict ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[int] = ['sentencepiece']
def __init__( self : Any, *a_ : Optional[int], **a_ : Any ):
"""simple docstring"""
requires_backends(self, ["sentencepiece"] )
| 31
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : bool , _UpperCamelCase : list[int] , _UpperCamelCase : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(_UpperCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCamelCase__ = math.log(len(_UpperCamelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 31
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = ['pixel_values']
def __init__( self : int, a_ : bool = True, a_ : Optional[Dict[str, int]] = None, a_ : PILImageResampling = PILImageResampling.BILINEAR, a_ : bool = True, a_ : Dict[str, int] = None, a_ : bool = True, a_ : Union[int, float] = 1 / 255, a_ : bool = True, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 256}
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase__ = get_size_dict(a_ )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : str, a_ : np.ndarray, a_ : Dict[str, int], a_ : PILImageResampling = PILImageResampling.BICUBIC, a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : int, ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ = get_resize_output_image_size(a_, size=size["shortest_edge"], default_to_square=a_ )
return resize(a_, size=a_, resample=a_, data_format=a_, **a_ )
def lowercase_ ( self : List[Any], a_ : np.ndarray, a_ : Dict[str, int], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_ )
return center_crop(a_, size=(size["height"], size["width"]), data_format=a_, **a_ )
def lowercase_ ( self : List[Any], a_ : np.ndarray, a_ : float, a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : List[Any] ):
"""simple docstring"""
return rescale(a_, scale=a_, data_format=a_, **a_ )
def lowercase_ ( self : Optional[Any], a_ : np.ndarray, a_ : Union[float, List[float]], a_ : Union[float, List[float]], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Any, ):
"""simple docstring"""
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def lowercase_ ( self : List[Any], a_ : ImageInput, a_ : Optional[bool] = None, a_ : Dict[str, int] = None, a_ : PILImageResampling = None, a_ : bool = None, a_ : Dict[str, int] = None, a_ : Optional[bool] = None, a_ : Optional[float] = None, a_ : Optional[bool] = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[str, TensorType]] = None, a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST, **a_ : str, ):
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a_ )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a_, size=a_, resample=a_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a_, size=a_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a_, a_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a_, tensor_type=a_ )
| 31
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 1
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
def is_in_circle(_UpperCamelCase : float , _UpperCamelCase : float ) -> bool:
UpperCamelCase__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCamelCase__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCamelCase__ = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(_UpperCamelCase : float ) -> float:
return x
UpperCamelCase__ = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> None:
'''simple docstring'''
def function_to_integrate(_UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
UpperCamelCase__ = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 1
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
@register_to_config
def __init__( self : List[str], a_ : int, a_ : int, a_ : int, a_ : float, a_ : int, a_ : int, a_ : int, a_ : int, a_ : str, a_ : bool = False, ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = nn.Embedding(a_, a_ )
UpperCamelCase__ = nn.Embedding(a_, a_ )
UpperCamelCase__ = False
UpperCamelCase__ = nn.Dropout(p=a_ )
UpperCamelCase__ = TaConfig(
vocab_size=a_, d_model=a_, num_heads=a_, d_kv=a_, d_ff=a_, dropout_rate=a_, feed_forward_proj=a_, is_decoder=a_, is_encoder_decoder=a_, )
UpperCamelCase__ = nn.ModuleList()
for lyr_num in range(a_ ):
UpperCamelCase__ = TaBlock(a_ )
self.encoders.append(a_ )
UpperCamelCase__ = TaLayerNorm(a_ )
UpperCamelCase__ = nn.Dropout(p=a_ )
def lowercase_ ( self : str, a_ : Any, a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.token_embedder(a_ )
UpperCamelCase__ = encoder_input_tokens.shape[1]
UpperCamelCase__ = torch.arange(a_, device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
UpperCamelCase__ = self.dropout_pre(a_ )
# inverted the attention mask
UpperCamelCase__ = encoder_input_tokens.size()
UpperCamelCase__ = self.get_extended_attention_mask(a_, a_ )
for lyr in self.encoders:
UpperCamelCase__ = lyr(a_, a_ )[0]
UpperCamelCase__ = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 31
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 1
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Tuple=0.9_9_9 , _UpperCamelCase : Dict="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase : str ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase : Optional[int] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
UpperCamelCase__ = []
for i in range(_UpperCamelCase ):
UpperCamelCase__ = i / num_diffusion_timesteps
UpperCamelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ) , _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase , dtype=torch.floataa )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase : Dict = 2
@register_to_config
def __init__( self : int, a_ : int = 1000, a_ : float = 0.00_085, a_ : float = 0.012, a_ : str = "linear", a_ : Optional[Union[np.ndarray, List[float]]] = None, a_ : str = "epsilon", a_ : Optional[bool] = False, a_ : Optional[bool] = False, a_ : float = 1.0, a_ : str = "linspace", a_ : int = 0, ):
"""simple docstring"""
if trained_betas is not None:
UpperCamelCase__ = torch.tensor(a_, dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase__ = torch.linspace(a_, a_, a_, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase__ = (
torch.linspace(beta_start**0.5, beta_end**0.5, a_, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase__ = betas_for_alpha_bar(a_, alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCamelCase__ = betas_for_alpha_bar(a_, alpha_transform_type="exp" )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
UpperCamelCase__ = 1.0 - self.betas
UpperCamelCase__ = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(a_, a_, a_ )
UpperCamelCase__ = use_karras_sigmas
def lowercase_ ( self : List[Any], a_ : List[str], a_ : str=None ):
"""simple docstring"""
if schedule_timesteps is None:
UpperCamelCase__ = self.timesteps
UpperCamelCase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase__ = 1 if len(a_ ) > 1 else 0
else:
UpperCamelCase__ = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
UpperCamelCase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase_ ( self : int, a_ : torch.FloatTensor, a_ : Union[float, torch.FloatTensor], ):
"""simple docstring"""
UpperCamelCase__ = self.index_for_timestep(a_ )
UpperCamelCase__ = self.sigmas[step_index]
UpperCamelCase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase_ ( self : Optional[int], a_ : int, a_ : Union[str, torch.device] = None, a_ : Optional[int] = None, ):
"""simple docstring"""
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase__ = np.linspace(0, num_train_timesteps - 1, a_, dtype=a_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ = (np.arange(0, a_ ) * step_ratio).round()[::-1].copy().astype(a_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ = (np.arange(a_, 0, -step_ratio )).round().copy().astype(a_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
UpperCamelCase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase__ = np.log(a_ )
UpperCamelCase__ = np.interp(a_, np.arange(0, len(a_ ) ), a_ )
if self.config.use_karras_sigmas:
UpperCamelCase__ = self._convert_to_karras(in_sigmas=a_, num_inference_steps=self.num_inference_steps )
UpperCamelCase__ = np.array([self._sigma_to_t(a_, a_ ) for sigma in sigmas] )
UpperCamelCase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase__ = torch.from_numpy(a_ ).to(device=a_ )
UpperCamelCase__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase__ = torch.from_numpy(a_ )
UpperCamelCase__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a_ ).startswith("mps" ):
# mps does not support float64
UpperCamelCase__ = timesteps.to(a_, dtype=torch.floataa )
else:
UpperCamelCase__ = timesteps.to(device=a_ )
# empty dt and derivative
UpperCamelCase__ = None
UpperCamelCase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase__ = defaultdict(a_ )
def lowercase_ ( self : Dict, a_ : Union[str, Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = np.log(a_ )
# get distribution
UpperCamelCase__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCamelCase__ = np.cumsum((dists >= 0), axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCamelCase__ = low_idx + 1
UpperCamelCase__ = log_sigmas[low_idx]
UpperCamelCase__ = log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase__ = (low - log_sigma) / (low - high)
UpperCamelCase__ = np.clip(a_, 0, 1 )
# transform interpolation to time range
UpperCamelCase__ = (1 - w) * low_idx + w * high_idx
UpperCamelCase__ = t.reshape(sigma.shape )
return t
def lowercase_ ( self : Optional[int], a_ : torch.FloatTensor, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = in_sigmas[-1].item()
UpperCamelCase__ = in_sigmas[0].item()
UpperCamelCase__ = 7.0 # 7.0 is the value used in the paper
UpperCamelCase__ = np.linspace(0, 1, a_ )
UpperCamelCase__ = sigma_min ** (1 / rho)
UpperCamelCase__ = sigma_max ** (1 / rho)
UpperCamelCase__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return self.dt is None
def lowercase_ ( self : Tuple, a_ : Union[torch.FloatTensor, np.ndarray], a_ : Union[float, torch.FloatTensor], a_ : Union[torch.FloatTensor, np.ndarray], a_ : bool = True, ):
"""simple docstring"""
UpperCamelCase__ = self.index_for_timestep(a_ )
# advance index counter by 1
UpperCamelCase__ = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase__ = self.sigmas[step_index]
UpperCamelCase__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCamelCase__ = self.sigmas[step_index - 1]
UpperCamelCase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase__ = 0
UpperCamelCase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase__ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase__ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCamelCase__ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
UpperCamelCase__ = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase__ = sigma_next - sigma_hat
# store for 2nd order step
UpperCamelCase__ = derivative
UpperCamelCase__ = dt
UpperCamelCase__ = sample
else:
# 2. 2nd order / Heun's method
UpperCamelCase__ = (sample - pred_original_sample) / sigma_next
UpperCamelCase__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCamelCase__ = self.dt
UpperCamelCase__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def lowercase_ ( self : Optional[int], a_ : torch.FloatTensor, a_ : torch.FloatTensor, a_ : torch.FloatTensor, ):
"""simple docstring"""
UpperCamelCase__ = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a_ ):
# mps does not support float64
UpperCamelCase__ = self.timesteps.to(original_samples.device, dtype=torch.floataa )
UpperCamelCase__ = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
UpperCamelCase__ = self.timesteps.to(original_samples.device )
UpperCamelCase__ = timesteps.to(original_samples.device )
UpperCamelCase__ = [self.index_for_timestep(a_, a_ ) for t in timesteps]
UpperCamelCase__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase__ = sigma.unsqueeze(-1 )
UpperCamelCase__ = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[Any] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 31
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 1
|
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowercase: Tuple = "bert-base-cased"
__lowercase: List[Any] = "fp16"
__lowercase: List[str] = "bf16"
__lowercase: Any = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : int ):
"""simple docstring"""
super().setUp()
UpperCamelCase__ = dict(
ACCELERATE_USE_FSDP="true", MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1", )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(a_ ):
UpperCamelCase__ = self.dist_env.copy()
UpperCamelCase__ = f'{i + 1}'
UpperCamelCase__ = strategy
with mockenv_context(**a_ ):
UpperCamelCase__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy, ShardingStrategy(i + 1 ) )
def lowercase_ ( self : str ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(a_ ):
UpperCamelCase__ = self.dist_env.copy()
UpperCamelCase__ = prefetch_policy
with mockenv_context(**a_ ):
UpperCamelCase__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1 ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(a_ ):
UpperCamelCase__ = self.dist_env.copy()
UpperCamelCase__ = state_dict_type
with mockenv_context(**a_ ):
UpperCamelCase__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = AutoModel.from_pretrained(a_ )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCamelCase__ = self.dist_env.copy()
UpperCamelCase__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCamelCase__ = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
UpperCamelCase__ = "2000"
with mockenv_context(**a_ ):
UpperCamelCase__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(a_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCamelCase__ = self.dist_env.copy()
UpperCamelCase__ = "TRANSFORMER_BASED_WRAP"
UpperCamelCase__ = "T5Layer"
with mockenv_context(**a_ ):
UpperCamelCase__ = FullyShardedDataParallelPlugin()
with self.assertRaises(a_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(a_ )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
UpperCamelCase__ = self.dist_env.copy()
UpperCamelCase__ = "SIZE_BASED_WRAP"
UpperCamelCase__ = "0"
with mockenv_context(**a_ ):
UpperCamelCase__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(a_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCamelCase__ = self.dist_env.copy()
UpperCamelCase__ = mp_dtype
with mockenv_context(**a_ ):
UpperCamelCase__ = Accelerator()
if mp_dtype == "fp16":
UpperCamelCase__ = torch.floataa
elif mp_dtype == "bf16":
UpperCamelCase__ = torch.bfloataa
UpperCamelCase__ = MixedPrecision(param_dtype=a_, reduce_dtype=a_, buffer_dtype=a_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy, a_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler, a_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCamelCase__ = self.dist_env.copy()
UpperCamelCase__ = str(a_ ).lower()
with mockenv_context(**a_ ):
UpperCamelCase__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload, CPUOffload(offload_params=a_ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : int ):
"""simple docstring"""
super().setUp()
UpperCamelCase__ = 0.82
UpperCamelCase__ = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
UpperCamelCase__ = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCamelCase__ = 160
UpperCamelCase__ = 160
UpperCamelCase__ = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = os.path.join(self.test_scripts_folder, "test_performance.py" )
UpperCamelCase__ = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
UpperCamelCase__ = cmd.copy()
for i, strategy in enumerate(a_ ):
if strategy.lower() in config:
cmd_config.append(f'--fsdp_sharding_strategy={i+1}' )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f'--output_dir={self.tmpdir}',
f'--performance_lower_bound={self.performance_lower_bound}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_, env=os.environ.copy() )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = os.path.join(self.test_scripts_folder, "test_checkpointing.py" )
UpperCamelCase__ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(a_ ):
UpperCamelCase__ = cmd.copy()
cmd_config.append(f'--fsdp_sharding_strategy={i+1}' )
if strategy != "FULL_SHARD":
continue
UpperCamelCase__ = len(a_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCamelCase__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'--fsdp_state_dict_type={state_dict_type}' )
cmd_config.extend(
[
self.test_file_path,
f'--output_dir={self.tmpdir}',
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_, env=os.environ.copy() )
UpperCamelCase__ = cmd_config[:-1]
UpperCamelCase__ = os.path.join(self.tmpdir, "epoch_0" )
cmd_config.extend(
[
f'--resume_from_checkpoint={resume_from_checkpoint}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_, env=os.environ.copy() )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = os.path.join(self.test_scripts_folder, "test_peak_memory_usage.py" )
UpperCamelCase__ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCamelCase__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(a_ ):
if strategy.lower() in spec:
cmd_config.append(f'--fsdp_sharding_strategy={i+1}' )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f'--output_dir={self.tmpdir}',
f'--peak_memory_upper_bound={peak_mem_upper_bound}',
f'--n_train={self.n_train}',
f'--n_val={self.n_val}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_, env=os.environ.copy() )
| 31
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 1
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : dict ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray ) -> XGBClassifier:
'''simple docstring'''
UpperCamelCase__ = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = load_iris()
UpperCamelCase__ , UpperCamelCase__ = data_handling(_UpperCamelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.2_5 )
UpperCamelCase__ = iris["target_names"]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> str | Literal[False]:
'''simple docstring'''
UpperCamelCase__ = list(_UpperCamelCase )
UpperCamelCase__ = list(_UpperCamelCase )
UpperCamelCase__ = 0
for i in range(len(_UpperCamelCase ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase__ = "_"
if count > 1:
return False
else:
return "".join(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[str] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ = []
while True:
UpperCamelCase__ = ["$"] * len(_UpperCamelCase )
UpperCamelCase__ = []
for i in range(len(_UpperCamelCase ) ):
for j in range(i + 1 , len(_UpperCamelCase ) ):
UpperCamelCase__ = compare_string(binary[i] , binary[j] )
if k is False:
UpperCamelCase__ = "*"
UpperCamelCase__ = "*"
temp.append("X" )
for i in range(len(_UpperCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_UpperCamelCase ) == 0:
return pi
UpperCamelCase__ = list(set(_UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Sequence[float] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ = []
for minterm in minterms:
UpperCamelCase__ = ""
for _ in range(_UpperCamelCase ):
UpperCamelCase__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(_UpperCamelCase )
return temp
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
UpperCamelCase__ = list(_UpperCamelCase )
UpperCamelCase__ = list(_UpperCamelCase )
UpperCamelCase__ = 0
for i in range(len(_UpperCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[list[int]] , _UpperCamelCase : list[str] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = [0] * len(_UpperCamelCase )
for i in range(len(chart[0] ) ):
UpperCamelCase__ = 0
UpperCamelCase__ = -1
for j in range(len(_UpperCamelCase ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase__ = j
if count == 1:
UpperCamelCase__ = 1
for i in range(len(_UpperCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_UpperCamelCase ) ):
UpperCamelCase__ = 0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase__ = 0
UpperCamelCase__ = -1
UpperCamelCase__ = 0
for i in range(len(_UpperCamelCase ) ):
UpperCamelCase__ = chart[i].count(1 )
if count_n > max_n:
UpperCamelCase__ = count_n
UpperCamelCase__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_UpperCamelCase ) ):
UpperCamelCase__ = 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[str] , _UpperCamelCase : list[str] ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase__ = [[0 for x in range(len(_UpperCamelCase ) )] for x in range(len(_UpperCamelCase ) )]
for i in range(len(_UpperCamelCase ) ):
UpperCamelCase__ = prime_implicants[i].count("_" )
for j in range(len(_UpperCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _UpperCamelCase ):
UpperCamelCase__ = 1
return chart
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = int(input("Enter the no. of variables\n" ) )
UpperCamelCase__ = [
float(_UpperCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
UpperCamelCase__ = decimal_to_binary(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = check(_UpperCamelCase )
print("Prime Implicants are:" )
print(_UpperCamelCase )
UpperCamelCase__ = prime_implicant_chart(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = selection(_UpperCamelCase , _UpperCamelCase )
print("Essential Prime Implicants are:" )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 31
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 1
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase: Any = logging.get_logger(__name__)
__lowercase: Union[str, Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 't5'
_lowerCamelCase : Optional[int] = ['past_key_values']
_lowerCamelCase : List[Any] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : str, a_ : str=3_2128, a_ : List[Any]=512, a_ : str=64, a_ : Union[str, Any]=2048, a_ : Optional[int]=6, a_ : Optional[Any]=None, a_ : Tuple=8, a_ : Tuple=32, a_ : Dict=128, a_ : Tuple=0.1, a_ : Any=1e-6, a_ : List[str]=1.0, a_ : str="relu", a_ : str=True, a_ : Union[str, Any]=True, a_ : List[str]=0, a_ : Union[str, Any]=1, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = d_kv
UpperCamelCase__ = d_ff
UpperCamelCase__ = num_layers
UpperCamelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase__ = num_heads
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = feed_forward_proj
UpperCamelCase__ = use_cache
UpperCamelCase__ = self.feed_forward_proj.split("-" )
UpperCamelCase__ = act_info[-1]
UpperCamelCase__ = act_info[0] == "gated"
if len(a_ ) > 1 and act_info[0] != "gated" or len(a_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCamelCase__ = "gelu_new"
super().__init__(
pad_token_id=a_, eos_token_id=a_, is_encoder_decoder=a_, **a_, )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCamelCase__ = "past_encoder_sequence + sequence"
UpperCamelCase__ = {0: "batch"}
UpperCamelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_, direction="inputs" )
return common_inputs
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 13
| 31
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowercase: List[str] = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: str = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: str = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__lowercase: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCamelCase__ = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
UpperCamelCase__ = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCamelCase__ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if "handwritten" in checkpoint_url:
UpperCamelCase__ = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase__ = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ViTConfig(image_size=3_84 , qkv_bias=_UpperCamelCase )
UpperCamelCase__ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCamelCase__ = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 10_24
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase__ = False
UpperCamelCase__ = "relu"
UpperCamelCase__ = 10_24
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
# load HuggingFace model
UpperCamelCase__ = ViTModel(_UpperCamelCase , add_pooling_layer=_UpperCamelCase )
UpperCamelCase__ = TrOCRForCausalLM(_UpperCamelCase )
UpperCamelCase__ = VisionEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
UpperCamelCase__ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" , check_hash=_UpperCamelCase )["model"]
UpperCamelCase__ = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCamelCase__ = state_dict.pop(_UpperCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
UpperCamelCase__ = val
else:
UpperCamelCase__ = val
# load state dict
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
UpperCamelCase__ = ViTImageProcessor(size=encoder_config.image_size )
UpperCamelCase__ = RobertaTokenizer.from_pretrained("roberta-large" )
UpperCamelCase__ = TrOCRProcessor(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = processor(images=prepare_img(_UpperCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
UpperCamelCase__ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCamelCase__ = model(pixel_values=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
UpperCamelCase__ = outputs.logits
UpperCamelCase__ = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__lowercase: Dict = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowercase: Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowercase: int = list[list[int]]
# assigning initial values to the grid
__lowercase: Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowercase: Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Matrix , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCamelCase ):
UpperCamelCase__ , UpperCamelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = digit
if sudoku(_UpperCamelCase ) is not None:
return grid
UpperCamelCase__ = 0
return None
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__lowercase: List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 31
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 1
|
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
UpperCamelCase__ = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
__lowercase: Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__lowercase: int = [{"type": "code", "content": INSTALL_CONTENT}]
__lowercase: int = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 31
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', "stage2.cls_token") )
return token
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = num_labels
UpperCamelCase__ = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase__ = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
UpperCamelCase__ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
UpperCamelCase__ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCamelCase__ = [2, 2, 20]
UpperCamelCase__ = [3, 12, 16]
UpperCamelCase__ = [1_92, 7_68, 10_24]
UpperCamelCase__ = CvtForImageClassification(_UpperCAmelCase )
UpperCamelCase__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
UpperCamelCase__ = image_size
UpperCamelCase__ = torch.load(_UpperCAmelCase , map_location=torch.device("cpu" ) )
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCamelCase__ = list_of_state_dict + cls_token(_UpperCAmelCase )
UpperCamelCase__ = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
UpperCamelCase__ = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
UpperCamelCase__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowercase: Dict = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you\'d like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__lowercase: Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase: int = Lock()
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCamelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCamelCase__ = min(lowercase__ , lowercase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCamelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCamelCase__ = max(lowercase__ , lowercase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCamelCase__ = Pipe()
UpperCamelCase__ = Pipe()
process_array_.append(
Process(
target=lowercase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCamelCase__ = temp_rs
UpperCamelCase__ = temp_rr
for i in range(1 , len(lowercase__ ) - 1 ):
UpperCamelCase__ = Pipe()
UpperCamelCase__ = Pipe()
process_array_.append(
Process(
target=lowercase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCamelCase__ = temp_rs
UpperCamelCase__ = temp_rr
process_array_.append(
Process(
target=lowercase__ , args=(
len(lowercase__ ) - 1,
arr[len(lowercase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase__ ) ):
UpperCamelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*lowercase__ )
UpperCamelCase__ = odd_even_transposition(lowercase__ )
print("Sorted List\n" )
print(*lowercase__ )
if __name__ == "__main__":
main()
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( _UpperCAmelCase , unittest.TestCase):
_lowerCamelCase : List[Any] = GPTSanJapaneseTokenizer
_lowerCamelCase : Dict = False
_lowerCamelCase : str = {"do_clean_text": False, "add_prefix_space": False}
def lowercase_ ( self : str ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
UpperCamelCase__ = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file, "w" ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCAmelCase ) )
def lowercase_ ( self : Tuple, **a_ : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def lowercase_ ( self : Optional[int], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
UpperCamelCase__ = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def lowercase_ ( self : Dict, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCamelCase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
UpperCamelCase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowercase_ ( self : Any ):
"""simple docstring"""
pass # TODO add if relevant
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def lowercase_ ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCamelCase__ = '''こんにちは、世界。 こんばんは、㔺界。'''
UpperCamelCase__ = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
UpperCamelCase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids without special tokens
UpperCamelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids with special tokens
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCamelCase__ = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
UpperCamelCase__ = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
UpperCamelCase__ = tokenizer.encode(_UpperCAmelCase )
UpperCamelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@slow
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCamelCase__ = '''こんにちは、世界。'''
UpperCamelCase__ = '''こんばんは、㔺界。😀'''
UpperCamelCase__ = '''こんにちは、世界。こんばんは、世界。😀'''
UpperCamelCase__ = tokenizer.encode(prefix_text + input_text )
UpperCamelCase__ = tokenizer.encode("", prefix_text=prefix_text + input_text )
UpperCamelCase__ = tokenizer.encode(_UpperCAmelCase, prefix_text=_UpperCAmelCase )
UpperCamelCase__ = tokenizer.decode(_UpperCAmelCase )
UpperCamelCase__ = tokenizer.decode(_UpperCAmelCase )
UpperCamelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCamelCase__ = '''こんにちは、世界。'''
UpperCamelCase__ = '''こんばんは、㔺界。😀'''
UpperCamelCase__ = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCamelCase__ = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCamelCase__ = [1] + [0] * (len_prefix + len_text + 1)
UpperCamelCase__ = [1] * (len_prefix + len_text + 1) + [0]
UpperCamelCase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCamelCase__ = tokenizer(prefix_text + input_text ).token_type_ids
UpperCamelCase__ = tokenizer("", prefix_text=prefix_text + input_text ).token_type_ids
UpperCamelCase__ = tokenizer(_UpperCAmelCase, prefix_text=_UpperCAmelCase ).token_type_ids
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
@slow
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCamelCase__ = tokenizer.encode("あンいワ" )
UpperCamelCase__ = tokenizer.encode("", prefix_text="あンいワ" )
UpperCamelCase__ = tokenizer.encode("いワ", prefix_text="あン" )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ), tokenizer.decode(_UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ), tokenizer.decode(_UpperCAmelCase ) )
self.assertNotEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(x_token_a[1], x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1], x_token_a[3] ) # SEG token
@slow
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCamelCase__ = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
UpperCamelCase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase )
UpperCamelCase__ = tokenizer.batch_encode_plus(_UpperCAmelCase, padding=_UpperCAmelCase )
# fmt: off
UpperCamelCase__ = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
UpperCamelCase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCamelCase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids, _UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids, _UpperCAmelCase )
self.assertListEqual(x_token.attention_mask, _UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids, _UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids, _UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask, _UpperCAmelCase )
def lowercase_ ( self : Any ):
"""simple docstring"""
pass
def lowercase_ ( self : int ):
"""simple docstring"""
pass
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__lowercase: Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__lowercase: Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__lowercase: List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__lowercase: Any = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
__lowercase: Tuple = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
__lowercase: List[str] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
__lowercase: Dict = tf.keras.preprocessing.image.img_to_array(test_image)
__lowercase: int = np.expand_dims(test_image, axis=0)
__lowercase: Any = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__lowercase: Union[str, Any] = "Normal"
if result[0][0] == 1:
__lowercase: Dict = "Abnormality detected"
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase: List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase):
_lowerCamelCase : Optional[int] = XLMRobertaTokenizer
_lowerCamelCase : Tuple = XLMRobertaTokenizerFast
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = True
def lowercase_ ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = XLMRobertaTokenizer(_lowerCamelCase, keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = '''<pad>'''
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<s>" )
self.assertEqual(vocab_keys[1], "<pad>" )
self.assertEqual(vocab_keys[-1], "<mask>" )
self.assertEqual(len(_lowerCamelCase ), 1002 )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1002 )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = XLMRobertaTokenizer(_lowerCamelCase, keep_accents=_lowerCamelCase )
UpperCamelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCamelCase, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
UpperCamelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCamelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase )
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(_lowerCamelCase )
UpperCamelCase__ = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCamelCase__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_lowerCamelCase, _lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(_lowerCamelCase )
UpperCamelCase__ = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=True
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(_lowerCamelCase, legacy_format=_lowerCamelCase )
UpperCamelCase__ = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCamelCase, _lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(_lowerCamelCase )
UpperCamelCase__ = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=False
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(_lowerCamelCase, legacy_format=_lowerCamelCase )
UpperCamelCase__ = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(_lowerCamelCase )
UpperCamelCase__ = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
@cached_property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowerCamelCase, f.name )
UpperCamelCase__ = XLMRobertaTokenizer(f.name, keep_accents=_lowerCamelCase )
UpperCamelCase__ = pickle.dumps(_lowerCamelCase )
pickle.loads(_lowerCamelCase )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ = tokenizer.tokenize(_lowerCamelCase )
UpperCamelCase__ = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
UpperCamelCase__ = tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
UpperCamelCase__ = rust_tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(_lowerCamelCase )
UpperCamelCase__ = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = '''Hello World!'''
UpperCamelCase__ = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowerCamelCase, self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCamelCase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowerCamelCase, self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {'''input_ids''': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase, model_name="xlm-roberta-base", revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3", )
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE__( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=__a , default=__a , required=__a , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=__a , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=__a , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=__a , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=__a , default=0 , help="cuda_id." , )
UpperCamelCase__ = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if not len(__a ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
UpperCamelCase__ = imgs[0].size
UpperCamelCase__ = Image.new("RGB" , size=(cols * w, rows * h) )
UpperCamelCase__ = grid.size
for i, img in enumerate(__a ):
grid.paste(__a , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]="robotic cat with wings" , _UpperCamelCase : Tuple=7.5 , _UpperCamelCase : Optional[Any]=50 , _UpperCamelCase : Dict=1 , _UpperCamelCase : Union[str, Any]=42 , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = torch.Generator(pipeline.device ).manual_seed(__a )
UpperCamelCase__ = pipeline(
__a , guidance_scale=__a , num_inference_steps=__a , generator=__a , num_images_per_prompt=__a , ).images
UpperCamelCase__ = int(math.sqrt(__a ) )
UpperCamelCase__ = image_grid(__a , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__lowercase: Optional[int] = parse_args()
# Load models and create wrapper for stable diffusion
__lowercase: Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
__lowercase: List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
__lowercase: Dict = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
__lowercase: Union[str, Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
__lowercase: Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__lowercase: Tuple = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
__lowercase: List[str] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
__lowercase: Dict = unet.to(torch.device("cuda", args.cuda_id))
__lowercase: Union[str, Any] = pipeline.to(unet.device)
__lowercase ,__lowercase: Tuple = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
__lowercase: Optional[int] = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__lowercase: List[str] = """src/diffusers"""
# Matches is_xxx_available()
__lowercase: int = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
__lowercase: List[str] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
__lowercase: Optional[Any] = """
{0} = None
"""
__lowercase: List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
__lowercase: Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = _re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def SCREAMING_SNAKE_CASE__( ) -> Optional[int]:
'''simple docstring'''
with open(os.path.join(__snake_case , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple=None ) -> List[str]:
'''simple docstring'''
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = "[" + ", ".join(F'"{b}"' for b in backend.split("_and_" ) ) + "]"
UpperCamelCase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(__snake_case , "utils" )
UpperCamelCase__ = {
backend: os.path.join(__snake_case , F'dummy_{short_names.get(__snake_case , __snake_case )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '
"to fix this." )
if __name__ == "__main__":
__lowercase: Tuple = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowercase: Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = 10
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = [1, 2, 3, 4]
UpperCamelCase__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case, self.block_size, 0 ), _snake_case )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case, self.block_size, 0 ), _snake_case )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case, self.block_size, 0 ), _snake_case )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
UpperCamelCase__ , UpperCamelCase__ = process_story(_snake_case )
self.assertEqual(_snake_case, [] )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = ""
UpperCamelCase__ , UpperCamelCase__ = process_story(_snake_case )
self.assertEqual(_snake_case, [] )
self.assertEqual(_snake_case, [] )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
UpperCamelCase__ , UpperCamelCase__ = process_story(_snake_case )
UpperCamelCase__ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(_snake_case, _snake_case )
UpperCamelCase__ = ["It was the best of times."]
self.assertEqual(_snake_case, _snake_case )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = torch.tensor([1, 2, 3, 4] )
UpperCamelCase__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_snake_case, 0 ).numpy(), expected.numpy() )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_snake_case, 23 ).numpy(), expected.numpy() )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_snake_case, 1 ).numpy(), expected.numpy() )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = 101
UpperCamelCase__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCamelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCamelCase__ = compute_token_type_ids(_snake_case, _snake_case )
np.testing.assert_array_equal(_snake_case, _snake_case )
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase: Optional[int] = logging.get_logger(__name__)
__lowercase: List[Any] = '▁'
__lowercase: Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
__lowercase: Optional[int] = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
__lowercase: Any = {
'facebook/xglm-564M': 2_048,
}
class UpperCAmelCase ( lowercase__):
_lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Dict, a_ : str, a_ : Optional[Any]="<s>", a_ : List[Any]="</s>", a_ : List[Any]="</s>", a_ : Optional[Any]="<s>", a_ : List[Any]="<unk>", a_ : Optional[int]="<pad>", a_ : Optional[Dict[str, Any]] = None, **a_ : List[Any], ):
"""simple docstring"""
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase__ = 7
UpperCamelCase__ = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
UpperCamelCase__ = kwargs.get("additional_special_tokens", [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_UpperCamelCase, eos_token=_UpperCamelCase, unk_token=_UpperCamelCase, sep_token=_UpperCamelCase, cls_token=_UpperCamelCase, pad_token=_UpperCamelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCamelCase, )
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCamelCase__ = len(self.sp_model )
UpperCamelCase__ = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_UpperCamelCase )
UpperCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
UpperCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase_ ( self : Optional[int], a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowercase_ ( self : Dict, a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase, token_ids_a=_UpperCamelCase, already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase ))
def lowercase_ ( self : List[str], a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : int, a_ : str ):
"""simple docstring"""
return self.sp_model.encode(_UpperCamelCase, out_type=_UpperCamelCase )
def lowercase_ ( self : Dict, a_ : Any ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self : Tuple, a_ : Union[str, Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self : Optional[int], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = """""".join(_UpperCamelCase ).replace(_UpperCamelCase, " " ).strip()
return out_string
def lowercase_ ( self : Optional[int], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
_UpperCamelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase, "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Dict ) -> int:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : Dict = MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : List[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="tf" )
UpperCamelCase__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(snake_case__, decimals=6 ), [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 3_8015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 2_5506, "token_str": " accuser"},
], )
UpperCamelCase__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(snake_case__, decimals=6 ), [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 3_8015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 2_5506,
"token_str": " accuser",
},
], )
UpperCamelCase__ = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3 )
self.assertEqual(
nested_simplify(snake_case__, decimals=6 ), [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
], )
@require_torch
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="pt" )
UpperCamelCase__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(snake_case__, decimals=6 ), [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 3_5676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 1_6416, "token_str": "ELS"},
], )
UpperCamelCase__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(snake_case__, decimals=6 ), [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 3_5676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 1_6416, "token_str": "ELS"},
], )
UpperCamelCase__ = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3 )
self.assertEqual(
nested_simplify(snake_case__, decimals=6 ), [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3606, "token_str": " Clara"},
], )
UpperCamelCase__ = unmasker("My name is <mask> <mask>", top_k=2 )
self.assertEqual(
nested_simplify(snake_case__, decimals=6 ), [
[
{
"score": 2.2e-0_5,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
], )
@require_torch_gpu
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = pipeline("fill-mask", model="hf-internal-testing/tiny-random-distilbert", device=0, framework="pt" )
# convert model to fp16
pipe.model.half()
UpperCamelCase__ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case__, snake_case__ )
@slow
@require_torch
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="pt" )
self.run_large_test(snake_case__ )
@slow
@require_tf
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="tf" )
self.run_large_test(snake_case__ )
def lowercase_ ( self : Union[str, Any], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ ), [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
], )
UpperCamelCase__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ ), [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_2790,
"token_str": " Lyon",
},
], )
UpperCamelCase__ = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ ), [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
], )
@require_torch
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="pt" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(snake_case__, [] )
@require_tf
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="tf" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(snake_case__, [] )
def lowercase_ ( self : Optional[Any], a_ : int, a_ : List[Any], a_ : Optional[Any] ):
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
UpperCamelCase__ = FillMaskPipeline(model=snake_case__, tokenizer=snake_case__ )
UpperCamelCase__ = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def lowercase_ ( self : str, a_ : Optional[int], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = fill_masker.tokenizer
UpperCamelCase__ = fill_masker.model
UpperCamelCase__ = fill_masker(
f'This is a {tokenizer.mask_token}', )
self.assertEqual(
snake_case__, [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
], )
UpperCamelCase__ = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
snake_case__, [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
], )
UpperCamelCase__ = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
snake_case__, [
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
], )
with self.assertRaises(snake_case__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case__ ):
fill_masker("This is" )
self.run_test_top_k(snake_case__, snake_case__ )
self.run_test_targets(snake_case__, snake_case__ )
self.run_test_top_k_targets(snake_case__, snake_case__ )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case__, snake_case__ )
self.fill_mask_with_multiple_masks(snake_case__, snake_case__ )
def lowercase_ ( self : Union[str, Any], a_ : Optional[Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase__ = FillMaskPipeline(model=snake_case__, tokenizer=snake_case__, targets=snake_case__ )
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
snake_case__, [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
], )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs}, snake_case__ )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs}, set(snake_case__ ) )
# Call argument
UpperCamelCase__ = FillMaskPipeline(model=snake_case__, tokenizer=snake_case__ )
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}', targets=snake_case__ )
self.assertEqual(
snake_case__, [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
], )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs}, snake_case__ )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs}, set(snake_case__ ) )
# Score equivalence
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}', targets=snake_case__ )
UpperCamelCase__ = [top_mask["token_str"] for top_mask in outputs]
UpperCamelCase__ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ) == set(snake_case__ ):
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}', targets=snake_case__ )
UpperCamelCase__ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case__ ), nested_simplify(snake_case__ ) )
# Raises with invalid
with self.assertRaises(snake_case__ ):
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}', targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case__ ):
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}', targets=[""] )
with self.assertRaises(snake_case__ ):
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}', targets="" )
def lowercase_ ( self : Any, a_ : Union[str, Any], a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = FillMaskPipeline(model=snake_case__, tokenizer=snake_case__, top_k=2 )
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
snake_case__, [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
], )
UpperCamelCase__ = FillMaskPipeline(model=snake_case__, tokenizer=snake_case__ )
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}', top_k=2 )
self.assertEqual(
snake_case__, [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
], )
self.assertEqual(nested_simplify(snake_case__ ), nested_simplify(snake_case__ ) )
def lowercase_ ( self : Any, a_ : Union[str, Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = FillMaskPipeline(model=snake_case__, tokenizer=snake_case__ )
# top_k=2, ntargets=3
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}', top_k=2, targets=snake_case__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase__ = [el["token_str"] for el in sorted(snake_case__, key=lambda a_ : x["score"], reverse=snake_case__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ).issubset(snake_case__ ):
UpperCamelCase__ = fill_masker(f'This is a {tokenizer.mask_token}', top_k=3, targets=snake_case__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case__ ), nested_simplify(snake_case__ ) )
def lowercase_ ( self : Tuple, a_ : str, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = FillMaskPipeline(model=snake_case__, tokenizer=snake_case__ )
UpperCamelCase__ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase__ = fill_masker(f'My name is {tokenizer.mask_token}', targets=snake_case__, top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case__ ), 3 )
def lowercase_ ( self : Optional[int], a_ : Tuple, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = FillMaskPipeline(model=snake_case__, tokenizer=snake_case__ )
UpperCamelCase__ = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}', top_k=2 )
self.assertEqual(
snake_case__, [
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
], )
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: Optional[int] = logging.get_logger(__name__)
__lowercase: Tuple = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE):
_lowerCamelCase = 'time_series_transformer'
_lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Union[str, Any], a_ : int = None, a_ : Dict = None, a_ : Union[str, Any] = "student_t", a_ : Optional[Any] = "nll", a_ : Optional[int] = 1, a_ : Tuple = [1, 2, 3, 4, 5, 6, 7], a_ : Any = "mean", a_ : Union[str, Any] = 0, a_ : str = 0, a_ : Dict = 0, a_ : Tuple = 0, a_ : int = None, a_ : Tuple = None, a_ : str = 32, a_ : str = 32, a_ : List[str] = 2, a_ : List[str] = 2, a_ : Dict = 2, a_ : Any = 2, a_ : Optional[Any] = True, a_ : Dict = "gelu", a_ : str = 64, a_ : int = 0.1, a_ : str = 0.1, a_ : int = 0.1, a_ : Union[str, Any] = 0.1, a_ : List[Any] = 0.1, a_ : Tuple = 100, a_ : List[Any] = 0.02, a_ : Optional[Any]=True, **a_ : Dict, ):
"""simple docstring"""
UpperCamelCase__ = prediction_length
UpperCamelCase__ = context_length or prediction_length
UpperCamelCase__ = distribution_output
UpperCamelCase__ = loss
UpperCamelCase__ = input_size
UpperCamelCase__ = num_time_features
UpperCamelCase__ = lags_sequence
UpperCamelCase__ = scaling
UpperCamelCase__ = num_dynamic_real_features
UpperCamelCase__ = num_static_real_features
UpperCamelCase__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCamelCase__ = cardinality
else:
UpperCamelCase__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCamelCase__ = embedding_dimension
else:
UpperCamelCase__ = [min(50, (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase__ = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase__ = input_size * len(_a ) + self._number_of_features
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = use_cache
super().__init__(is_encoder_decoder=_a, **_a )
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'data2vec-audio'
def __init__( self : Tuple, a_ : Optional[int]=32, a_ : Optional[int]=768, a_ : Optional[Any]=12, a_ : List[str]=12, a_ : Any=3072, a_ : Union[str, Any]="gelu", a_ : str=0.1, a_ : int=0.1, a_ : str=0.1, a_ : str=0.0, a_ : Dict=0.1, a_ : Tuple=0.1, a_ : Dict=0.02, a_ : Tuple=1e-5, a_ : Dict="gelu", a_ : str=(512, 512, 512, 512, 512, 512, 512), a_ : List[Any]=(5, 2, 2, 2, 2, 2, 2), a_ : Any=(10, 3, 3, 3, 3, 2, 2), a_ : List[str]=False, a_ : Tuple=16, a_ : List[str]=19, a_ : int=5, a_ : Any=0.05, a_ : Dict=10, a_ : List[Any]=2, a_ : Any=0.0, a_ : int=10, a_ : str=0, a_ : Any="sum", a_ : Dict=False, a_ : Union[str, Any]=False, a_ : Tuple=256, a_ : Tuple=(512, 512, 512, 512, 1500), a_ : int=(5, 3, 3, 1, 1), a_ : Optional[Any]=(1, 2, 3, 1, 1), a_ : Union[str, Any]=512, a_ : Dict=0, a_ : str=1, a_ : List[str]=2, a_ : List[Any]=False, a_ : Dict=3, a_ : Optional[Any]=2, a_ : Optional[int]=3, a_ : Union[str, Any]=None, **a_ : Union[str, Any], ):
"""simple docstring"""
super().__init__(**a_, pad_token_id=a_, bos_token_id=a_, eos_token_id=a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(a_ )
UpperCamelCase__ = list(a_ )
UpperCamelCase__ = list(a_ )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = conv_pos_kernel_size
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# adapter
UpperCamelCase__ = add_adapter
UpperCamelCase__ = adapter_kernel_size
UpperCamelCase__ = adapter_stride
UpperCamelCase__ = num_adapter_layers
UpperCamelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(a_ )
UpperCamelCase__ = list(a_ )
UpperCamelCase__ = list(a_ )
UpperCamelCase__ = xvector_output_dim
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase):
_lowerCamelCase : Optional[Any] = IFInpaintingPipeline
_lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowerCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return self._get_dummy_components()
def lowercase_ ( self : Any, a_ : str, a_ : str=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a_ )
else:
UpperCamelCase__ = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCamelCase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase_ ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA" )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
self._test_save_load_local()
def lowercase_ ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def SCREAMING_SNAKE_CASE__( ) -> tuple[list[int], int]:
'''simple docstring'''
UpperCamelCase__ = [randint(-10_00 , 10_00 ) for i in range(10 )]
UpperCamelCase__ = randint(-50_00 , 50_00 )
return (arr, r)
__lowercase: List[str] = make_dataset()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> tuple[int, ...]:
'''simple docstring'''
for triplet in permutations(a_ , 3 ):
if sum(a_ ) == target:
return tuple(sorted(a_ ) )
return (0, 0, 0)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> tuple[int, int, int]:
'''simple docstring'''
arr.sort()
UpperCamelCase__ = len(a_ )
for i in range(n - 1 ):
UpperCamelCase__ , UpperCamelCase__ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def SCREAMING_SNAKE_CASE__( ) -> tuple[float, float]:
'''simple docstring'''
UpperCamelCase__ = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCamelCase__ = "\ntriplet_sum1(*dataset)\n"
UpperCamelCase__ = "\ntriplet_sum2(*dataset)\n"
UpperCamelCase__ = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_00_00 )
UpperCamelCase__ = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_00_00 )
return (min(a_ ), min(a_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowercase: Optional[Any] = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> int:
'''simple docstring'''
if not isinstance(__a , __a ):
UpperCamelCase__ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
UpperCamelCase__ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
UpperCamelCase__ = 1
for i in range(1 , __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
__lowercase: Optional[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase: Optional[int] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase: Dict = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCamelCase__ = year // 1_00
UpperCamelCase__ = (5 * (century % 4) + 2) % 7
UpperCamelCase__ = year % 1_00
UpperCamelCase__ = centurian % 12
UpperCamelCase__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCamelCase__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCamelCase__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = '''rwkv'''
_lowerCamelCase : Tuple = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : Union[str, Any], a_ : List[str]=5_0277, a_ : Optional[int]=1024, a_ : Any=4096, a_ : Dict=32, a_ : Union[str, Any]=None, a_ : str=None, a_ : Dict=1e-5, a_ : List[Any]=0, a_ : List[str]=0, a_ : int=6, a_ : Any=False, a_ : List[str]=True, **a_ : int, ):
"""simple docstring"""
UpperCamelCase__ = vocab_size
UpperCamelCase__ = context_length
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCamelCase__ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = rescale_every
UpperCamelCase__ = use_cache
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = eos_token_id
super().__init__(
tie_word_embeddings=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__ )
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = 1
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def lowercase_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, )
return model
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
return model
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5006, )
return RobertaSeriesModelWithTransformation(_SCREAMING_SNAKE_CASE )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
def extract(*a_ : Optional[int], **a_ : Optional[int] ):
class UpperCAmelCase :
def __init__( self : str ):
"""simple docstring"""
UpperCamelCase__ = torch.ones([0] )
def lowercase_ ( self : int, a_ : Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.dummy_cond_unet
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = self.dummy_vae
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCamelCase__ = 77
UpperCamelCase__ = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCamelCase__ = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE, scheduler=_SCREAMING_SNAKE_CASE, vae=_SCREAMING_SNAKE_CASE, text_encoder=_SCREAMING_SNAKE_CASE, tokenizer=_SCREAMING_SNAKE_CASE, safety_checker=_SCREAMING_SNAKE_CASE, feature_extractor=self.dummy_extractor, )
UpperCamelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = "A painting of a squirrel eating a burger"
UpperCamelCase__ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase__ = alt_pipe(
[prompt], generator=_SCREAMING_SNAKE_CASE, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=_SCREAMING_SNAKE_CASE, )
UpperCamelCase__ = output.images
UpperCamelCase__ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase__ = alt_pipe(
[prompt], generator=_SCREAMING_SNAKE_CASE, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=_SCREAMING_SNAKE_CASE, return_dict=_SCREAMING_SNAKE_CASE, )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU" )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.dummy_cond_unet
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = self.dummy_vae
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCamelCase__ = 77
UpperCamelCase__ = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
# put models in fp16
UpperCamelCase__ = unet.half()
UpperCamelCase__ = vae.half()
UpperCamelCase__ = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE, scheduler=_SCREAMING_SNAKE_CASE, vae=_SCREAMING_SNAKE_CASE, text_encoder=_SCREAMING_SNAKE_CASE, tokenizer=_SCREAMING_SNAKE_CASE, safety_checker=_SCREAMING_SNAKE_CASE, feature_extractor=self.dummy_extractor, )
UpperCamelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = "A painting of a squirrel eating a burger"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = alt_pipe(
[prompt], generator=_SCREAMING_SNAKE_CASE, num_inference_steps=2, output_type="np", image=_SCREAMING_SNAKE_CASE, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU" )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ = init_image.resize((760, 504) )
UpperCamelCase__ = "BAAI/AltDiffusion"
UpperCamelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE, safety_checker=_SCREAMING_SNAKE_CASE, )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
UpperCamelCase__ = "A fantasy landscape, trending on artstation"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=_SCREAMING_SNAKE_CASE, image=_SCREAMING_SNAKE_CASE, strength=0.75, guidance_scale=7.5, generator=_SCREAMING_SNAKE_CASE, output_type="np", )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCamelCase__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCamelCase__ = init_image.resize((768, 512) )
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
UpperCamelCase__ = "BAAI/AltDiffusion"
UpperCamelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE, safety_checker=_SCREAMING_SNAKE_CASE, )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
UpperCamelCase__ = "A fantasy landscape, trending on artstation"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=_SCREAMING_SNAKE_CASE, image=_SCREAMING_SNAKE_CASE, strength=0.75, guidance_scale=7.5, generator=_SCREAMING_SNAKE_CASE, output_type="np", )
UpperCamelCase__ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase: List[str] = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: int = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Any = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: str = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
import math
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> int:
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> List[Any]:
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : Optional[int], a_ : List[str], a_ : Any=7, a_ : Tuple=3, a_ : str=30, a_ : Optional[Any]=400, a_ : Optional[Any]=True, a_ : Optional[Any]=None, a_ : Optional[int]=True, a_ : int=[0.5, 0.5, 0.5], a_ : Tuple=[0.5, 0.5, 0.5], a_ : int=True, a_ : List[str]=1 / 255, a_ : List[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_pad
def lowercase_ ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Tuple, a_ : Optional[int], a_ : Union[str, Any]=False ):
"""simple docstring"""
if not batched:
UpperCamelCase__ = image_inputs[0]
if isinstance(lowercase_, Image.Image ):
UpperCamelCase__ = image.size
else:
UpperCamelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ = int(self.size["shortest_edge"] * h / w )
UpperCamelCase__ = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase__ = self.size["""shortest_edge"""]
UpperCamelCase__ = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase__ = self.size["""shortest_edge"""]
UpperCamelCase__ = self.size["""shortest_edge"""]
else:
UpperCamelCase__ = []
for image in image_inputs:
UpperCamelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ = max(lowercase_, key=lambda a_ : item[0] )[0]
UpperCamelCase__ = max(lowercase_, key=lambda a_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( _UpperCAmelCase , unittest.TestCase):
_lowerCamelCase : Optional[Any] = DetaImageProcessor if is_vision_available() else None
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = DetaImageProcessingTester(self )
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_, "image_mean" ) )
self.assertTrue(hasattr(lowercase_, "image_std" ) )
self.assertTrue(hasattr(lowercase_, "do_normalize" ) )
self.assertTrue(hasattr(lowercase_, "do_resize" ) )
self.assertTrue(hasattr(lowercase_, "do_rescale" ) )
self.assertTrue(hasattr(lowercase_, "do_pad" ) )
self.assertTrue(hasattr(lowercase_, "size" ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad, lowercase_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_, Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
UpperCamelCase__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCamelCase__ = self.image_processor_tester.get_expected_values(lowercase_, batched=lowercase_ )
UpperCamelCase__ = image_processing(lowercase_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_, numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_, np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
UpperCamelCase__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCamelCase__ = image_processing(lowercase_, return_tensors="pt" ).pixel_values
UpperCamelCase__ = self.image_processor_tester.get_expected_values(lowercase_, batched=lowercase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_, torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_, torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
UpperCamelCase__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCamelCase__ = image_processing(lowercase_, return_tensors="pt" ).pixel_values
UpperCamelCase__ = self.image_processor_tester.get_expected_values(lowercase_, batched=lowercase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r" ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {"""image_id""": 3_9769, """annotations""": target}
# encode them
UpperCamelCase__ = DetaImageProcessor()
UpperCamelCase__ = image_processing(images=lowercase_, annotations=lowercase_, return_tensors="pt" )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape, lowercase_ )
UpperCamelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], lowercase_, atol=1e-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], lowercase_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape, lowercase_ )
UpperCamelCase__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], lowercase_, atol=1e-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], lowercase_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], lowercase_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], lowercase_ ) )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], lowercase_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], lowercase_ ) )
@slow
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r" ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
UpperCamelCase__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCamelCase__ = DetaImageProcessor(format="coco_panoptic" )
UpperCamelCase__ = image_processing(images=lowercase_, annotations=lowercase_, masks_path=lowercase_, return_tensors="pt" )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape, lowercase_ )
UpperCamelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], lowercase_, atol=1e-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], lowercase_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape, lowercase_ )
UpperCamelCase__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], lowercase_, atol=1e-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], lowercase_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], lowercase_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], lowercase_ ) )
# verify masks
UpperCamelCase__ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), lowercase_ )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], lowercase_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], lowercase_ ) )
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( __lowercase):
_lowerCamelCase : List[str] = ['image_processor', 'tokenizer']
_lowerCamelCase : str = 'BlipImageProcessor'
_lowerCamelCase : Any = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[int], a_ : List[Any], a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = False
super().__init__(_a, _a )
UpperCamelCase__ = self.image_processor
def __call__( self : Dict, a_ : Tuple = None, a_ : Any = None, a_ : Dict = True, a_ : Optional[int] = False, a_ : Optional[int] = None, a_ : Tuple = None, a_ : Optional[int] = 0, a_ : List[str] = None, a_ : Optional[int] = None, a_ : List[str] = False, a_ : str = False, a_ : Dict = False, a_ : str = False, a_ : str = False, a_ : Tuple = True, a_ : Dict = None, **a_ : Optional[int], ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase__ = self.tokenizer
UpperCamelCase__ = self.tokenizer(
text=_a, add_special_tokens=_a, padding=_a, truncation=_a, max_length=_a, stride=_a, pad_to_multiple_of=_a, return_attention_mask=_a, return_overflowing_tokens=_a, return_special_tokens_mask=_a, return_offsets_mapping=_a, return_token_type_ids=_a, return_length=_a, verbose=_a, return_tensors=_a, **_a, )
return text_encoding
# add pixel_values
UpperCamelCase__ = self.image_processor(_a, return_tensors=_a )
if text is not None:
UpperCamelCase__ = self.tokenizer(
text=_a, add_special_tokens=_a, padding=_a, truncation=_a, max_length=_a, stride=_a, pad_to_multiple_of=_a, return_attention_mask=_a, return_overflowing_tokens=_a, return_special_tokens_mask=_a, return_offsets_mapping=_a, return_token_type_ids=_a, return_length=_a, verbose=_a, return_tensors=_a, **_a, )
else:
UpperCamelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def lowercase_ ( self : Dict, *a_ : Optional[Any], **a_ : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_a, **_a )
def lowercase_ ( self : Any, *a_ : Optional[Any], **a_ : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*_a, **_a )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer.model_input_names
UpperCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase: Union[str, Any] = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Union[str, Any] = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowercase: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self : int, a_ : Dict, a_ : Union[str, Any]=12, a_ : Any=7, a_ : List[str]=True, a_ : str=True, a_ : Optional[Any]=True, a_ : Tuple=99, a_ : Optional[Any]=32, a_ : Union[str, Any]=32, a_ : Optional[int]=2, a_ : Dict=4, a_ : Union[str, Any]=37, a_ : Union[str, Any]=0.1, a_ : int=0.1, a_ : Union[str, Any]=512, a_ : str=0.02, a_ : List[Any]=0, a_ : Any=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = projection_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
UpperCamelCase__ = bos_token_id
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCamelCase__ = input_mask.numpy()
UpperCamelCase__ = input_mask.shape
UpperCamelCase__ = np.random.randint(1, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def lowercase_ ( self : Tuple, a_ : Any, a_ : Optional[Any], a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = TFBlipTextModel(config=a_ )
UpperCamelCase__ = model(a_, attention_mask=a_, training=a_ )
UpperCamelCase__ = model(a_, training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __snake_case , unittest.TestCase):
_lowerCamelCase : str = (TFBlipTextModel,) if is_tf_available() else ()
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = BlipTextModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, hidden_size=37 )
def lowercase_ ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
pass
def lowercase_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def lowercase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def lowercase_ ( self : Dict ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFBlipTextModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase_ ( self : Any, a_ : List[str]=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=a_ )
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE_):
_lowerCamelCase : List[Any] = 'mobilenet_v2'
def __init__( self : List[str], a_ : int=3, a_ : Optional[Any]=224, a_ : int=1.0, a_ : Union[str, Any]=8, a_ : Optional[Any]=8, a_ : Union[str, Any]=6, a_ : Dict=32, a_ : Optional[int]=True, a_ : Any=True, a_ : Dict="relu6", a_ : str=True, a_ : List[Any]=0.8, a_ : Optional[Any]=0.02, a_ : Optional[Any]=0.001, a_ : Optional[int]=255, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = depth_multiplier
UpperCamelCase__ = depth_divisible_by
UpperCamelCase__ = min_depth
UpperCamelCase__ = expand_ratio
UpperCamelCase__ = output_stride
UpperCamelCase__ = first_layer_is_expansion
UpperCamelCase__ = finegrained_output
UpperCamelCase__ = hidden_act
UpperCamelCase__ = tf_padding
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = semantic_loss_ignore_index
class UpperCAmelCase ( SCREAMING_SNAKE_CASE_):
_lowerCamelCase : Any = version.parse('1.11')
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 1_00 ) -> int:
'''simple docstring'''
UpperCamelCase__ = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase__ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> List[Any]:
'''simple docstring'''
def update_area_of_max_square(_UpperCamelCase : int , _UpperCamelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCamelCase__ = update_area_of_max_square(snake_case_ , col + 1 )
UpperCamelCase__ = update_area_of_max_square(row + 1 , col + 1 )
UpperCamelCase__ = update_area_of_max_square(row + 1 , snake_case_ )
if mat[row][col]:
UpperCamelCase__ = 1 + min([right, diagonal, down] )
UpperCamelCase__ = max(largest_square_area[0] , snake_case_ )
return sub_problem_sol
else:
return 0
UpperCamelCase__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> Optional[int]:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCamelCase__ = update_area_of_max_square_using_dp_array(snake_case_ , col + 1 , snake_case_ )
UpperCamelCase__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , snake_case_ )
UpperCamelCase__ = update_area_of_max_square_using_dp_array(row + 1 , snake_case_ , snake_case_ )
if mat[row][col]:
UpperCamelCase__ = 1 + min([right, diagonal, down] )
UpperCamelCase__ = max(largest_square_area[0] , snake_case_ )
UpperCamelCase__ = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCamelCase__ = [0]
UpperCamelCase__ = [[-1] * cols for _ in range(snake_case_ )]
update_area_of_max_square_using_dp_array(0 , 0 , snake_case_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCamelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCamelCase__ = dp_array[row][col + 1]
UpperCamelCase__ = dp_array[row + 1][col + 1]
UpperCamelCase__ = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCamelCase__ = 1 + min(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = max(dp_array[row][col] , snake_case_ )
else:
UpperCamelCase__ = 0
return largest_square_area
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[int]] ) -> int:
'''simple docstring'''
UpperCamelCase__ = [0] * (cols + 1)
UpperCamelCase__ = [0] * (cols + 1)
UpperCamelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCamelCase__ = current_row[col + 1]
UpperCamelCase__ = next_row[col + 1]
UpperCamelCase__ = next_row[col]
if mat[row][col] == 1:
UpperCamelCase__ = 1 + min(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = max(current_row[col] , snake_case_ )
else:
UpperCamelCase__ = 0
UpperCamelCase__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
__lowercase: Any = 10
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = max(lowercase__ )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase__ = [[] for _ in range(lowercase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase__ = int((i / placement) % RADIX )
buckets[tmp].append(lowercase__ )
# put each buckets' contents into list_of_ints
UpperCamelCase__ = 0
for b in range(lowercase__ ):
for i in buckets[b]:
UpperCamelCase__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
import operator
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : bool = False , _UpperCamelCase : list | None = None ) -> str:
'''simple docstring'''
UpperCamelCase__ = operator.lt if reverse else operator.gt
UpperCamelCase__ = solution or []
if not arr:
return solution
UpperCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(A__ ):
if _operator(A__ , sublist[-1] ):
sublist.append(A__ )
arr.pop(A__ )
# merging sublist into solution list
if not solution:
solution.extend(A__ )
else:
while sublist:
UpperCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(A__ ):
if not _operator(A__ , A__ ):
solution.insert(A__ , A__ )
break
else:
solution.append(A__ )
strand_sort(A__ , A__ , A__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : Optional[Any], a_ : List[str], a_ : int=7, a_ : Any=3, a_ : Dict=18, a_ : str=30, a_ : List[str]=400, a_ : Any=True, a_ : Tuple=None, a_ : Tuple=True, a_ : int=[0.5, 0.5, 0.5], a_ : Dict=[0.5, 0.5, 0.5], ):
"""simple docstring"""
UpperCamelCase__ = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
def lowercase_ ( self : Dict ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = DPTImageProcessingTester(self )
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase, "image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase, "image_std" ) )
self.assertTrue(hasattr(__lowerCAmelCase, "do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase, "do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase, "size" ) )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase, Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(__lowerCAmelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowerCAmelCase, numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase, np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(__lowerCAmelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowerCAmelCase, torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase, torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(__lowerCAmelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = SamImageProcessor()
UpperCamelCase__ = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Tuple, **a_ : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def lowercase_ ( self : str ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_image_processor(do_normalize=__lowerCAmelCase, padding_value=1.0 )
UpperCamelCase__ = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__lowerCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __lowerCAmelCase )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(__lowerCAmelCase, return_tensors="np" )
UpperCamelCase__ = processor(images=__lowerCAmelCase, return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
@require_torch
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = [torch.ones((1, 3, 5, 5) )]
UpperCamelCase__ = [[1764, 2646]]
UpperCamelCase__ = [[683, 1024]]
UpperCamelCase__ = processor.post_process_masks(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
UpperCamelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCamelCase__ = processor.post_process_masks(__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = processor.post_process_masks(__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ) )
@require_vision
@require_tf
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = SamImageProcessor()
UpperCamelCase__ = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_image_processor(do_normalize=__lowerCAmelCase, padding_value=1.0 )
UpperCamelCase__ = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=__lowerCAmelCase, padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __lowerCAmelCase )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(__lowerCAmelCase, return_tensors="np" )
UpperCamelCase__ = processor(images=__lowerCAmelCase, return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
@require_tf
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = [tf.ones((1, 3, 5, 5) )]
UpperCamelCase__ = [[1764, 2646]]
UpperCamelCase__ = [[683, 1024]]
UpperCamelCase__ = processor.post_process_masks(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors="tf" )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, tf.convert_to_tensor(__lowerCAmelCase ), tf.convert_to_tensor(__lowerCAmelCase ), return_tensors="tf", )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
# should also work with np
UpperCamelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ), return_tensors="tf" )
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646) )
UpperCamelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, np.array(__lowerCAmelCase ), np.array(__lowerCAmelCase ), return_tensors="tf" )
@require_vision
@require_torchvision
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = SamImageProcessor()
UpperCamelCase__ = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__lowerCAmelCase ).image_processor
def lowercase_ ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = np.random.randint(0, 2, size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCamelCase__ = [tf.convert_to_tensor(__lowerCAmelCase )]
UpperCamelCase__ = [torch.tensor(__lowerCAmelCase )]
UpperCamelCase__ = [[1764, 2646]]
UpperCamelCase__ = [[683, 1024]]
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors="tf" )
UpperCamelCase__ = processor.post_process_masks(
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = SamProcessor(image_processor=__lowerCAmelCase )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(__lowerCAmelCase, return_tensors="pt" )["pixel_values"].numpy()
UpperCamelCase__ = processor(images=__lowerCAmelCase, return_tensors="pt" )["pixel_values"].numpy()
UpperCamelCase__ = image_processor(__lowerCAmelCase, return_tensors="tf" )["pixel_values"].numpy()
UpperCamelCase__ = processor(images=__lowerCAmelCase, return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase, __lowerCAmelCase ) )
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__( self : List[Any], a_ : List[Any], a_ : Union[str, Any]=3, a_ : Tuple=32, a_ : Union[str, Any]=3, a_ : Union[str, Any]=10, a_ : Optional[int]=[10, 20, 30, 40], a_ : Dict=[1, 1, 2, 1], a_ : Dict=True, a_ : Union[str, Any]=True, a_ : Optional[Any]="relu", a_ : List[Any]=3, a_ : List[str]=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embeddings_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = scope
UpperCamelCase__ = len(_snake_case )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def lowercase_ ( self : Dict, a_ : List[Any], a_ : Union[str, Any], a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = RegNetModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCamelCase__ = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase_ ( self : Union[str, Any], a_ : Union[str, Any], a_ : Dict, a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = RegNetForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCamelCase__ = model(_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_lowerCamelCase : Optional[int] = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : Any = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : List[Any] = False
_lowerCamelCase : int = False
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = RegNetModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=_snake_case, has_text_modality=_snake_case )
def lowercase_ ( self : Dict ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowercase_ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_snake_case )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], _snake_case )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(config=_snake_case )
for name, module in model.named_modules():
if isinstance(_snake_case, (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ), msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
self.assertTrue(
torch.all(module.bias == 0 ), msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
def check_hidden_states_output(a_ : Any, a_ : Any, a_ : Union[str, Any] ):
UpperCamelCase__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(_snake_case, _snake_case ) )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(_snake_case ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ = layer_type
UpperCamelCase__ = True
check_hidden_states_output(_snake_case, _snake_case, _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(_snake_case, _snake_case, _snake_case )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = RegNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE__( ) -> str:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_snake_case )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_snake_case, return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**_snake_case )
# verify the logits
UpperCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, _snake_case )
UpperCamelCase__ = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _snake_case, atol=1e-4 ) )
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ = torch.load(lowercase_ , map_location="cpu" )
UpperCamelCase__ = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
UpperCamelCase__ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase__ = v
else:
UpperCamelCase__ = v
UpperCamelCase__ = chkpt["params"]
UpperCamelCase__ = {n: v for n, v in config.items() if not isinstance(lowercase_ , (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase__ = chkpt["dico_word2id"]
UpperCamelCase__ = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase__ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
UpperCamelCase__ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowercase_ , lowercase_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase_ , indent=2 ) + "\n" )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase_ , indent=2 ) + "\n" )
if __name__ == "__main__":
__lowercase: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowercase: Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 8 ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
i -= len(_a )
UpperCamelCase__ = i // 3
UpperCamelCase__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCamelCase__ = (
chars_incl
+ random(_a , quotient + remainder )
+ random(_a , _a )
+ random(_a , _a )
)
UpperCamelCase__ = list(_a )
shuffle(_a )
return "".join(_a )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
pass # Put your code here...
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
pass # Put your code here...
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
pass # Put your code here...
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int = 8 ) -> str:
'''simple docstring'''
if len(_a ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCamelCase__ = any(char in ascii_uppercase for char in password )
UpperCamelCase__ = any(char in ascii_lowercase for char in password )
UpperCamelCase__ = any(char in digits for char in password )
UpperCamelCase__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE__( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = int(input("Please indicate the max length of your password: " ).strip() )
UpperCamelCase__ = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_a ) )
print(
"Alternative Password generated:" , alternative_password_generator(_a , _a ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
UpperCamelCase__ = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
UpperCamelCase__ = int(sequence[i] , 2 )
return sequence
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCamelCase__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCamelCase__ = gray_code_sequence_string(bit_count - 1 )
UpperCamelCase__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCamelCase__ = "0" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCamelCase__ = "1" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__lowercase: Tuple = logging.get_logger(__name__)
class UpperCAmelCase ( lowerCamelCase_):
def __init__( self : int, *a_ : int, **a_ : Optional[int] ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead.", lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__ )
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase: Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCamelCase__):
_lowerCamelCase : Union[str, Any] = ['pixel_values']
def __init__( self : List[Any], a_ : Any = True, a_ : List[Any] = None, a_ : int = PILImageResampling.BILINEAR, a_ : Any = True, a_ : List[Any] = None, a_ : Dict = True, a_ : Optional[int] = 1 / 255, a_ : Optional[Any] = True, a_ : List[Any] = None, a_ : Optional[Any] = None, **a_ : Optional[int], ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 256}
UpperCamelCase__ = get_size_dict(__lowerCamelCase, default_to_square=__lowerCamelCase )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase__ = get_size_dict(__lowerCamelCase )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : int, a_ : List[Any], a_ : List[Any], a_ : int = PILImageResampling.BICUBIC, a_ : int = None, **a_ : Dict, ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(__lowerCamelCase, default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ = get_resize_output_image_size(__lowerCamelCase, size=size["shortest_edge"], default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase, size=__lowerCamelCase, resample=__lowerCamelCase, data_format=__lowerCamelCase, **__lowerCamelCase )
def lowercase_ ( self : List[Any], a_ : Union[str, Any], a_ : Optional[Any], a_ : int = None, **a_ : Optional[int], ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(__lowerCamelCase )
return center_crop(__lowerCamelCase, size=(size["height"], size["width"]), data_format=__lowerCamelCase, **__lowerCamelCase )
def lowercase_ ( self : Optional[int], a_ : List[Any], a_ : str, a_ : List[Any] = None, **a_ : Optional[Any] ):
"""simple docstring"""
return rescale(__lowerCamelCase, scale=__lowerCamelCase, data_format=__lowerCamelCase, **__lowerCamelCase )
def lowercase_ ( self : Optional[Any], a_ : List[str], a_ : Any, a_ : Any, a_ : List[str] = None, **a_ : int, ):
"""simple docstring"""
return normalize(__lowerCamelCase, mean=__lowerCamelCase, std=__lowerCamelCase, data_format=__lowerCamelCase, **__lowerCamelCase )
def lowercase_ ( self : Any, a_ : Tuple, a_ : Union[str, Any] = None, a_ : str = None, a_ : Any = None, a_ : Optional[int] = None, a_ : Optional[int] = None, a_ : str = None, a_ : Optional[int] = None, a_ : str = None, a_ : int = None, a_ : Tuple = None, a_ : Optional[Any] = None, a_ : str = ChannelDimension.FIRST, **a_ : Union[str, Any], ):
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(__lowerCamelCase, default_to_square=__lowerCamelCase )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(__lowerCamelCase )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=__lowerCamelCase, size=__lowerCamelCase, resample=__lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=__lowerCamelCase, size=__lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=__lowerCamelCase, scale=__lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=__lowerCamelCase, mean=__lowerCamelCase, std=__lowerCamelCase ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(__lowerCamelCase, __lowerCamelCase ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase, tensor_type=__lowerCamelCase )
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowercase: Optional[int] = "sshleifer/bart-tiny-random"
__lowercase: Dict = "patrickvonplaten/t5-tiny-random"
@require_torch
class UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return AutoConfig.from_pretrained(_lowerCamelCase )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=1, d=_lowerCamelCase )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=1, d=_lowerCamelCase )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ):
create_student_by_copying_alternating_layers(_lowerCamelCase, tempfile.mkdtemp(), e=_lowerCamelCase, d=_lowerCamelCase )
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import requests
__lowercase: Any = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int = 1 , _UpperCamelCase : str = "new" , _UpperCamelCase : list | None = None ) -> dict:
'''simple docstring'''
UpperCamelCase__ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_snake_case ) - valid_terms ) ):
UpperCamelCase__ = F'Invalid search term: {invalid_search_terms}'
raise ValueError(_snake_case )
UpperCamelCase__ = requests.get(
F'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 4_29:
raise requests.HTTPError
UpperCamelCase__ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_snake_case )}
UpperCamelCase__ = {}
for id_ in range(_snake_case ):
UpperCamelCase__ = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.