code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
__lowercase = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowercase: str = [8, 5, 9, 7]
__lowercase: Optional[int] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowercase: Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase :
def __init__( self : List[Any], a_ : list[int], a_ : list[list[int]], a_ : list[list[int]], ):
"""simple docstring"""
UpperCamelCase__ = claim_vector
UpperCamelCase__ = allocated_resources_table
UpperCamelCase__ = maximum_claim_table
def lowercase_ ( self : Any ):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowercase_ ( self : Dict ):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(a_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return {self.__need().index(a_ ): i for i in self.__need()}
def lowercase_ ( self : List[str], **a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.__need()
UpperCamelCase__ = self.__allocated_resources_table
UpperCamelCase__ = self.__available_resources()
UpperCamelCase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
UpperCamelCase__ = False
for each_need in need_list:
UpperCamelCase__ = True
for index, need in enumerate(a_ ):
if need > available_resources[index]:
UpperCamelCase__ = False
break
if execution:
UpperCamelCase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase__ = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(a_ )
# update available/freed resources stack
UpperCamelCase__ = np.array(a_ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(a_ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(a_ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(a_ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(a_ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(a_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__lowercase: Dict = None
__lowercase: Dict = logging.get_logger(__name__)
__lowercase: Optional[Any] = "▁"
__lowercase: List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowercase: Optional[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
__lowercase: Any = {
"google/pegasus-xsum": 512,
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PegasusTokenizer
_lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any], a_ : List[Any]=None, a_ : Any=None, a_ : Dict="<pad>", a_ : Union[str, Any]="</s>", a_ : List[Any]="<unk>", a_ : Optional[Any]="<mask_2>", a_ : int="<mask_1>", a_ : str=None, a_ : Optional[int]=103, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(a_, a_ ):
raise TypeError(
f'additional_special_tokens should be of type {type(a_ )}, but is'
f' {type(a_ )}' )
UpperCamelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(a_ ), self.offset - 1 )
]
if len(set(a_ ) ) != len(a_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
UpperCamelCase__ = additional_special_tokens_extended
else:
UpperCamelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2, self.offset )]
super().__init__(
a_, tokenizer_file=a_, pad_token=a_, eos_token=a_, unk_token=a_, mask_token=a_, mask_token_sent=a_, offset=a_, additional_special_tokens=a_, **a_, )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
def lowercase_ ( self : List[Any], a_ : str ):
"""simple docstring"""
UpperCamelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ ( self : Optional[int], a_ : List, a_ : Optional[List] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(a_ )
elif token_ids_a is None:
return self._special_token_mask(a_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ ( self : Any, a_ : Dict, a_ : int=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[Any], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file, a_ )
return (out_vocab_file,)
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : bytes ) -> str:
'''simple docstring'''
return "".join([hex(_UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(_UpperCamelCase )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if (len(_UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
UpperCamelCase__ = int(number**0.5 )
return number == sq * sq
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ = x_den * y_den * z_den
UpperCamelCase__ = gcd(_UpperCamelCase , _UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = 42
UpperCamelCase__ = Fraction(0 )
UpperCamelCase__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ = x_num * y_den + x_den * y_num
UpperCamelCase__ = x_den * y_den
UpperCamelCase__ = gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
UpperCamelCase__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ = x_den * x_den * y_den * y_den
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
UpperCamelCase__ = int(sqrt(_UpperCamelCase ) )
UpperCamelCase__ = int(sqrt(_UpperCamelCase ) )
UpperCamelCase__ = gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=-1
UpperCamelCase__ = x_num * y_num
UpperCamelCase__ = x_den * y_num + x_num * y_den
UpperCamelCase__ = gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
UpperCamelCase__ = x_num * x_num * y_num * y_num
UpperCamelCase__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
UpperCamelCase__ = int(sqrt(_UpperCamelCase ) )
UpperCamelCase__ = int(sqrt(_UpperCamelCase ) )
UpperCamelCase__ = gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
for num, den in unique_s:
total += Fraction(_UpperCamelCase , _UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowercase: Optional[Any] = logging.get_logger(__name__)
__lowercase: Dict = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'deberta-v2'
def __init__( self : int, a_ : Optional[Any]=12_8100, a_ : Dict=1536, a_ : List[str]=24, a_ : Optional[Any]=24, a_ : Dict=6144, a_ : Dict="gelu", a_ : Dict=0.1, a_ : Dict=0.1, a_ : List[str]=512, a_ : int=0, a_ : Optional[int]=0.02, a_ : Any=1e-7, a_ : List[str]=False, a_ : str=-1, a_ : Union[str, Any]=0, a_ : Tuple=True, a_ : Any=None, a_ : int=0, a_ : Union[str, Any]="gelu", **a_ : Tuple, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = relative_attention
UpperCamelCase__ = max_relative_positions
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = position_biased_input
# Backwards compatibility
if type(a_ ) == str:
UpperCamelCase__ = [x.strip() for x in pos_att_type.lower().split("|" )]
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = kwargs.get("pooler_hidden_size", a_ )
UpperCamelCase__ = pooler_dropout
UpperCamelCase__ = pooler_hidden_act
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return 12
def lowercase_ ( self : Optional[Any], a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], a_ : int = -1, a_ : int = -1, a_ : int = -1, a_ : bool = False, a_ : Optional["TensorType"] = None, a_ : int = 3, a_ : int = 40, a_ : int = 40, a_ : "PreTrainedTokenizerBase" = None, ):
"""simple docstring"""
UpperCamelCase__ = super().generate_dummy_inputs(preprocessor=a_, framework=a_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase: int = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = ['input_features', 'is_longer']
def __init__( self : Optional[int], a_ : Optional[int]=64, a_ : List[str]=4_8000, a_ : List[str]=480, a_ : int=10, a_ : Tuple=1024, a_ : Tuple=0.0, a_ : List[Any]=False, a_ : float = 0, a_ : float = 1_4000, a_ : int = None, a_ : str = "fusion", a_ : str = "repeatpad", **a_ : int, ):
"""simple docstring"""
super().__init__(
feature_size=a_, sampling_rate=a_, padding_value=a_, return_attention_mask=a_, **a_, )
UpperCamelCase__ = top_db
UpperCamelCase__ = truncation
UpperCamelCase__ = padding
UpperCamelCase__ = fft_window_size
UpperCamelCase__ = (fft_window_size >> 1) + 1
UpperCamelCase__ = hop_length
UpperCamelCase__ = max_length_s
UpperCamelCase__ = max_length_s * sampling_rate
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = frequency_min
UpperCamelCase__ = frequency_max
UpperCamelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=a_, min_frequency=a_, max_frequency=a_, sampling_rate=a_, norm=a_, mel_scale="htk", )
UpperCamelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=a_, min_frequency=a_, max_frequency=a_, sampling_rate=a_, norm="slaney", mel_scale="slaney", )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase_ ( self : List[Any], a_ : np.array, a_ : Optional[np.array] = None ):
"""simple docstring"""
UpperCamelCase__ = spectrogram(
a_, window_function(self.fft_window_size, "hann" ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=a_, log_mel="dB", )
return log_mel_spectrogram.T
def lowercase_ ( self : Any, a_ : Dict, a_ : List[str], a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase__ = [0]
# randomly choose index for each part
UpperCamelCase__ = np.random.choice(ranges[0] )
UpperCamelCase__ = np.random.choice(ranges[1] )
UpperCamelCase__ = np.random.choice(ranges[2] )
UpperCamelCase__ = mel[idx_front : idx_front + chunk_frames, :]
UpperCamelCase__ = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCamelCase__ = mel[idx_back : idx_back + chunk_frames, :]
UpperCamelCase__ = torch.tensor(mel[None, None, :] )
UpperCamelCase__ = torch.nn.functional.interpolate(
a_, size=[chunk_frames, 64], mode="bilinear", align_corners=a_ )
UpperCamelCase__ = mel_shrink[0][0].numpy()
UpperCamelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase_ ( self : List[str], a_ : np.array, a_ : List[Any], a_ : int, a_ : List[str] ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCamelCase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCamelCase__ = len(a_ ) - max_length
UpperCamelCase__ = np.random.randint(0, overflow + 1 )
UpperCamelCase__ = waveform[idx : idx + max_length]
UpperCamelCase__ = self._np_extract_fbank_features(a_, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCamelCase__ = self._np_extract_fbank_features(a_, self.mel_filters )
UpperCamelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCamelCase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCamelCase__ = np.stack([mel, mel, mel, mel], axis=0 )
UpperCamelCase__ = False
else:
UpperCamelCase__ = self._random_mel_fusion(a_, a_, a_ )
UpperCamelCase__ = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
UpperCamelCase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCamelCase__ = int(max_length / len(a_ ) )
UpperCamelCase__ = np.stack(np.tile(a_, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCamelCase__ = int(max_length / len(a_ ) )
UpperCamelCase__ = np.stack(np.tile(a_, a_ ) )
UpperCamelCase__ = np.pad(a_, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0 )
if truncation == "fusion":
UpperCamelCase__ = self._np_extract_fbank_features(a_, self.mel_filters )
UpperCamelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
UpperCamelCase__ = self._np_extract_fbank_features(a_, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Tuple, a_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], a_ : str = None, a_ : Optional[str] = None, a_ : Optional[int] = None, a_ : Optional[int] = None, a_ : Optional[Union[str, TensorType]] = None, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = truncation if truncation is not None else self.truncation
UpperCamelCase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase__ = isinstance(a_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
UpperCamelCase__ = is_batched_numpy or (
isinstance(a_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ = [np.asarray(a_, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a_, np.ndarray ):
UpperCamelCase__ = np.asarray(a_, dtype=np.floataa )
elif isinstance(a_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ = [np.asarray(a_ )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCamelCase__ = [
self._get_input_mel(a_, max_length if max_length else self.nb_max_samples, a_, a_ )
for waveform in raw_speech
]
UpperCamelCase__ = []
UpperCamelCase__ = []
for mel, longer in padded_inputs:
input_mel.append(a_ )
is_longer.append(a_ )
if truncation == "fusion" and sum(a_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCamelCase__ = np.random.randint(0, len(a_ ) )
UpperCamelCase__ = True
if isinstance(input_mel[0], a_ ):
UpperCamelCase__ = [np.asarray(a_, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCamelCase__ = [[longer] for longer in is_longer]
UpperCamelCase__ = {"input_features": input_mel, "is_longer": is_longer}
UpperCamelCase__ = BatchFeature(a_ )
if return_tensors is not None:
UpperCamelCase__ = input_features.convert_to_tensors(a_ )
return input_features
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__lowercase: List[str] = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : str = UNetaDModel
_lowerCamelCase : List[str] = 'sample'
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor([10] ).to(a_ )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (3, 32, 32)
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Optional[int] = UNetaDModel
_lowerCamelCase : List[Any] = 'sample'
@property
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 4
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor([10] ).to(a_ )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (4, 32, 32)
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return (4, 32, 32)
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(a_ )
UpperCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU" )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=a_ )
model.to(a_ )
UpperCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU" )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=a_ )
model_accelerate.to(a_ )
model_accelerate.eval()
UpperCamelCase__ = torch.randn(
1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0 ), )
UpperCamelCase__ = noise.to(a_ )
UpperCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(a_ )
UpperCamelCase__ = model_accelerate(a_, a_ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update", output_loading_info=a_, low_cpu_mem_usage=a_ )
model_normal_load.to(a_ )
model_normal_load.eval()
UpperCamelCase__ = model_normal_load(a_, a_ )["sample"]
assert torch_all_close(a_, a_, rtol=1e-3 )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(a_ )
UpperCamelCase__ = torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), )
UpperCamelCase__ = noise.to(a_ )
UpperCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(a_ )
with torch.no_grad():
UpperCamelCase__ = model(a_, a_ ).sample
UpperCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(a_, a_, rtol=1e-3 ) )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : List[Any] = UNetaDModel
_lowerCamelCase : List[str] = 'sample'
@property
def lowercase_ ( self : Dict, a_ : Dict=(32, 32) ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa, device=a_ )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256", output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(a_ )
UpperCamelCase__ = self.dummy_input
UpperCamelCase__ = floats_tensor((4, 3) + (256, 256) ).to(a_ )
UpperCamelCase__ = noise
UpperCamelCase__ = model(**a_ )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(a_ )
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (256, 256)
UpperCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor(batch_size * [1e-4] ).to(a_ )
with torch.no_grad():
UpperCamelCase__ = model(a_, a_ ).sample
UpperCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(a_, a_, rtol=1e-2 ) )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(a_ )
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor(batch_size * [1e-4] ).to(a_ )
with torch.no_grad():
UpperCamelCase__ = model(a_, a_ ).sample
UpperCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(a_, a_, rtol=1e-2 ) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
def is_in_circle(_UpperCamelCase : float , _UpperCamelCase : float ) -> bool:
UpperCamelCase__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCamelCase__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCamelCase__ = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(_UpperCamelCase : float ) -> float:
return x
UpperCamelCase__ = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> None:
'''simple docstring'''
def function_to_integrate(_UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
UpperCamelCase__ = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__lowercase: Optional[int] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__lowercase: Optional[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCamelCase__ = bs[:]
UpperCamelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCamelCase__ = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ = char
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : List[str] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int], a_ : int, a_ : List[str], a_ : Optional[int]="replace", a_ : int="<s>", a_ : List[Any]="</s>", a_ : List[Any]="</s>", a_ : Tuple="<s>", a_ : Tuple="<unk>", a_ : Any="<pad>", a_ : Optional[int]="<mask>", a_ : Any=False, **a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else bos_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else eos_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else sep_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else cls_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else unk_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token
super().__init__(
errors=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, cls_token=a_, pad_token=a_, mask_token=a_, add_prefix_space=a_, **a_, )
with open(a_, encoding="utf-8" ) as vocab_handle:
UpperCamelCase__ = json.load(a_ )
UpperCamelCase__ = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ = errors # how to handle errors in decoding
UpperCamelCase__ = bytes_to_unicode()
UpperCamelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(a_, encoding="utf-8" ) as merges_handle:
UpperCamelCase__ = merges_handle.read().split("\n" )[1:-1]
UpperCamelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = {}
UpperCamelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return len(self.encoder )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase_ ( self : List[Any], a_ : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase__ = tuple(a_ )
UpperCamelCase__ = get_pairs(a_ )
if not pairs:
return token
while True:
UpperCamelCase__ = min(a_, key=lambda a_ : self.bpe_ranks.get(a_, float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ , UpperCamelCase__ = bigram
UpperCamelCase__ = []
UpperCamelCase__ = 0
while i < len(a_ ):
try:
UpperCamelCase__ = word.index(a_, a_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase__ = j
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ = tuple(a_ )
UpperCamelCase__ = new_word
if len(a_ ) == 1:
break
else:
UpperCamelCase__ = get_pairs(a_ )
UpperCamelCase__ = " ".join(a_ )
UpperCamelCase__ = word
return word
def lowercase_ ( self : Any, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = []
for token in re.findall(self.pat, a_ ):
UpperCamelCase__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a_ ).split(" " ) )
return bpe_tokens
def lowercase_ ( self : Any, a_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(a_, self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Optional[int], a_ : str ):
"""simple docstring"""
return self.decoder.get(a_ )
def lowercase_ ( self : List[str], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = "".join(a_ )
UpperCamelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors )
return text
def lowercase_ ( self : Tuple, a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a_, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=a_, ensure_ascii=a_ ) + "\n" )
UpperCamelCase__ = 0
with open(a_, "w", encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
UpperCamelCase__ = token_index
writer.write(" ".join(a_ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : Optional[Any], a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : str, a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def lowercase_ ( self : Tuple, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Tuple, a_ : Dict, a_ : str=False, **a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = kwargs.pop("add_prefix_space", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a_ ) > 0 and not text[0].isspace()):
UpperCamelCase__ = " " + text
return (text, kwargs)
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__lowercase: int = "__DUMMY_TRANSFORMERS_USER__"
__lowercase: Optional[Any] = "Dummy User"
__lowercase: List[str] = "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"
__lowercase: Any = "https://hub-ci.huggingface.co"
__lowercase: Optional[int] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
__lowercase: List[str] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
__lowercase: List[str] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _UpperCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _UpperCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _UpperCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _UpperCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
HfFolder.save_token(_UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE__( ) -> List[Any]:
'''simple docstring'''
return HfApi(endpoint=_UpperCamelCase )
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : HfApi ) -> str:
'''simple docstring'''
UpperCamelCase__ = HfFolder.get_token()
HfFolder.save_token(_UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
def _cleanup_repo(_UpperCamelCase : str ):
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_UpperCamelCase : List[Any] ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : HfApi , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = F'repo_txt_data-{int(time.time() * 10e3 )}'
UpperCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type="dataset" , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_UpperCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : HfApi , _UpperCamelCase : int , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = F'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
UpperCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type="dataset" , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo="data.zip" , repo_id=_UpperCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : HfApi , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = F'repo_zipped_img_data-{int(time.time() * 10e3 )}'
UpperCamelCase__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type="dataset" , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo="data.zip" , repo_id=_UpperCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase: Tuple = logging.get_logger(__name__)
__lowercase: Union[str, Any] = {"vocab_file": "spiece.model"}
__lowercase: Tuple = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
__lowercase: str = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
__lowercase: List[str] = "▁"
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any], a_ : List[Any], a_ : str=True, a_ : Union[str, Any]=True, a_ : Tuple=False, a_ : Any="[CLS]", a_ : Optional[Any]="[SEP]", a_ : str="<unk>", a_ : List[str]="[SEP]", a_ : str="<pad>", a_ : List[Any]="[CLS]", a_ : Dict="[MASK]", a_ : Optional[Dict[str, Any]] = None, **a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = (
AddedToken(a_, lstrip=a_, rstrip=a_, normalized=a_ )
if isinstance(a_, a_ )
else mask_token
)
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_, remove_space=a_, keep_accents=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = remove_space
UpperCamelCase__ = keep_accents
UpperCamelCase__ = vocab_file
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return len(self.sp_model )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self : Optional[Any], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : List[Any], a_ : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCamelCase__ = " ".join(inputs.strip().split() )
else:
UpperCamelCase__ = inputs
UpperCamelCase__ = outputs.replace("``", "\"" ).replace("''", "\"" )
if not self.keep_accents:
UpperCamelCase__ = unicodedata.normalize("NFKD", a_ )
UpperCamelCase__ = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
UpperCamelCase__ = outputs.lower()
return outputs
def lowercase_ ( self : Any, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self.preprocess_text(a_ )
UpperCamelCase__ = self.sp_model.encode(a_, out_type=a_ )
UpperCamelCase__ = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
UpperCamelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_, "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase__ = cur_pieces[1:]
else:
UpperCamelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def lowercase_ ( self : Any, a_ : Optional[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(a_ )
def lowercase_ ( self : Optional[Any], a_ : Optional[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(a_ )
def lowercase_ ( self : Optional[int], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = ""
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(a_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def lowercase_ ( self : Tuple, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : str, a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1]
def lowercase_ ( self : int, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Optional[Any], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase: Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = ['pixel_values']
def __init__( self : Union[str, Any], a_ : bool = True, a_ : Dict[str, int] = None, a_ : float = None, a_ : PILImageResampling = PILImageResampling.BILINEAR, a_ : bool = True, a_ : Union[int, float] = 1 / 255, a_ : bool = True, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, **a_ : List[str], ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 384}
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCamelCase__ = crop_pct if crop_pct is not None else 224 / 256
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : Optional[Any], a_ : np.ndarray, a_ : Dict[str, int], a_ : float, a_ : PILImageResampling = PILImageResampling.BICUBIC, a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Any, ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCamelCase__ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCamelCase__ = int(shortest_edge / crop_pct )
UpperCamelCase__ = get_resize_output_image_size(a_, size=a_, default_to_square=a_ )
UpperCamelCase__ = resize(image=a_, size=a_, resample=a_, data_format=a_, **a_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a_, size=(shortest_edge, shortest_edge), data_format=a_, **a_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a_, size=(shortest_edge, shortest_edge), resample=a_, data_format=a_, **a_ )
def lowercase_ ( self : Union[str, Any], a_ : np.ndarray, a_ : Union[int, float], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Tuple, ):
"""simple docstring"""
return rescale(a_, scale=a_, data_format=a_, **a_ )
def lowercase_ ( self : str, a_ : np.ndarray, a_ : Union[float, List[float]], a_ : Union[float, List[float]], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : List[str], ):
"""simple docstring"""
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def lowercase_ ( self : str, a_ : ImageInput, a_ : bool = None, a_ : Dict[str, int] = None, a_ : float = None, a_ : PILImageResampling = None, a_ : bool = None, a_ : float = None, a_ : bool = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[str, TensorType]] = None, a_ : ChannelDimension = ChannelDimension.FIRST, **a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a_, size=a_, crop_pct=a_, resample=a_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a_, a_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a_, tensor_type=a_ )
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowercase: List[str] = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCamelCase : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCamelCase : str = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowercase_ ( self : str, a_ : Dict, a_ : Any, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = ZeroShotClassificationPipeline(
model=a_, tokenizer=a_, candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowercase_ ( self : Tuple, a_ : int, a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = classifier("Who are you voting for in 2020?", candidate_labels="politics" )
self.assertEqual(a_, {"sequence": ANY(a_ ), "labels": [ANY(a_ )], "scores": [ANY(a_ )]} )
# No kwarg
UpperCamelCase__ = classifier("Who are you voting for in 2020?", ["politics"] )
self.assertEqual(a_, {"sequence": ANY(a_ ), "labels": [ANY(a_ )], "scores": [ANY(a_ )]} )
UpperCamelCase__ = classifier("Who are you voting for in 2020?", candidate_labels=["politics"] )
self.assertEqual(a_, {"sequence": ANY(a_ ), "labels": [ANY(a_ )], "scores": [ANY(a_ )]} )
UpperCamelCase__ = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health" )
self.assertEqual(
a_, {"sequence": ANY(a_ ), "labels": [ANY(a_ ), ANY(a_ )], "scores": [ANY(a_ ), ANY(a_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ), 1.0 )
UpperCamelCase__ = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"] )
self.assertEqual(
a_, {"sequence": ANY(a_ ), "labels": [ANY(a_ ), ANY(a_ )], "scores": [ANY(a_ ), ANY(a_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ), 1.0 )
UpperCamelCase__ = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}" )
self.assertEqual(a_, {"sequence": ANY(a_ ), "labels": [ANY(a_ )], "scores": [ANY(a_ )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCamelCase__ = classifier(["I am happy"], ["positive", "negative"] )
self.assertEqual(
a_, [
{"sequence": ANY(a_ ), "labels": [ANY(a_ ), ANY(a_ )], "scores": [ANY(a_ ), ANY(a_ )]}
for i in range(1 )
], )
UpperCamelCase__ = classifier(["I am happy", "I am sad"], ["positive", "negative"] )
self.assertEqual(
a_, [
{"sequence": ANY(a_ ), "labels": [ANY(a_ ), ANY(a_ )], "scores": [ANY(a_ ), ANY(a_ )]}
for i in range(2 )
], )
with self.assertRaises(a_ ):
classifier("", candidate_labels="politics" )
with self.assertRaises(a_ ):
classifier(a_, candidate_labels="politics" )
with self.assertRaises(a_ ):
classifier("Who are you voting for in 2020?", candidate_labels="" )
with self.assertRaises(a_ ):
classifier("Who are you voting for in 2020?", candidate_labels=a_ )
with self.assertRaises(a_ ):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(a_ ):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=a_, )
self.run_entailment_id(a_ )
def lowercase_ ( self : List[str], a_ : Pipeline ):
"""simple docstring"""
UpperCamelCase__ = zero_shot_classifier.model.config
UpperCamelCase__ = config.labelaid
UpperCamelCase__ = zero_shot_classifier.entailment_id
UpperCamelCase__ = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1 )
UpperCamelCase__ = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCamelCase__ = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCamelCase__ = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2 )
UpperCamelCase__ = original_labelaid
self.assertEqual(a_, zero_shot_classifier.entailment_id )
@require_torch
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"] )
@require_torch
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
UpperCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(a_ ), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
UpperCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(a_ ), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt" )
UpperCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(a_ ), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
UpperCamelCase__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=a_, )
self.assertEqual(
nested_simplify(a_ ), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf" )
UpperCamelCase__ = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(a_ ), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
UpperCamelCase__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=a_, )
self.assertEqual(
nested_simplify(a_ ), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : bool , _UpperCamelCase : list[int] , _UpperCamelCase : float ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(_UpperCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
def SCREAMING_SNAKE_CASE__( ) -> None:
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCamelCase__ = math.log(len(_UpperCamelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: List[Any] = logging.get_logger()
@dataclass
class UpperCAmelCase :
_lowerCamelCase : nn.Module
_lowerCamelCase : List[nn.Module] = field(default_factory=SCREAMING_SNAKE_CASE__)
_lowerCamelCase : list = field(default_factory=SCREAMING_SNAKE_CASE__)
def lowercase_ ( self : Union[str, Any], a_ : List[Any], a_ : Tensor, a_ : Tensor ):
"""simple docstring"""
UpperCamelCase__ = len(list(m.modules() ) ) == 1 or isinstance(a_, nn.Convad ) or isinstance(a_, nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self : Union[str, Any], a_ : Tensor ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : nn.Module
_lowerCamelCase : nn.Module
_lowerCamelCase : int = 0
_lowerCamelCase : List = field(default_factory=SCREAMING_SNAKE_CASE__)
_lowerCamelCase : List = field(default_factory=SCREAMING_SNAKE_CASE__)
def __call__( self : str, a_ : Tensor ):
"""simple docstring"""
UpperCamelCase__ = Tracker(self.dest )(a_ ).parametrized
UpperCamelCase__ = Tracker(self.src )(a_ ).parametrized
UpperCamelCase__ = list(filter(lambda a_ : type(a_ ) not in self.src_skip, a_ ) )
UpperCamelCase__ = list(filter(lambda a_ : type(a_ ) not in self.dest_skip, a_ ) )
if len(a_ ) != len(a_ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(a_ )} operations while'
f' destination module has {len(a_ )}.' )
for dest_m, src_m in zip(a_, a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : ResNetConfig , _UpperCamelCase : Path , _UpperCamelCase : bool = True ) -> Dict:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
UpperCamelCase__ = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase ).eval()
UpperCamelCase__ = ResNetForImageClassification(_UpperCamelCase ).eval()
UpperCamelCase__ = ModuleTransfer(src=_UpperCamelCase , dest=_UpperCamelCase )
UpperCamelCase__ = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_UpperCamelCase )
assert torch.allclose(from_model(_UpperCamelCase ) , our_model(_UpperCamelCase ).logits ), "The model logits don't match the original one."
UpperCamelCase__ = F'resnet{"-".join(name.split("resnet" ) )}'
print(_UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_UpperCamelCase , )
# we can use the convnext one
UpperCamelCase__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , )
print(F'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Path , _UpperCamelCase : str = None , _UpperCamelCase : bool = True ) -> int:
'''simple docstring'''
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = 10_00
UpperCamelCase__ = (1, num_labels)
UpperCamelCase__ = "huggingface/label-files"
UpperCamelCase__ = num_labels
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
UpperCamelCase__ = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(_UpperCamelCase , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
__lowercase: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__lowercase: Any = parser.parse_args()
__lowercase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : bool = False ) -> str:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = F'Expected string as input, found {type(_UpperCamelCase )}'
raise ValueError(_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = F'Expected boolean as use_pascal parameter, found {type(_UpperCamelCase )}'
raise ValueError(_UpperCamelCase )
UpperCamelCase__ = input_str.split("_" )
UpperCamelCase__ = 0 if use_pascal else 1
UpperCamelCase__ = words[start_index:]
UpperCamelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase__ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase :
def __init__( self : List[Any], a_ : Optional[Any], a_ : Optional[int]=13, a_ : List[Any]=7, a_ : List[str]=True, a_ : Any=True, a_ : Optional[int]=True, a_ : str=True, a_ : List[str]=99, a_ : int=32, a_ : Optional[int]=2, a_ : Tuple=4, a_ : Optional[int]=37, a_ : Optional[Any]="gelu", a_ : Optional[int]=0.1, a_ : Optional[int]=0.1, a_ : Tuple=512, a_ : Dict=16, a_ : Union[str, Any]=2, a_ : Optional[int]=0.02, a_ : Optional[Any]=False, a_ : Union[str, Any]=True, a_ : str="None", a_ : str=3, a_ : int=4, a_ : Any=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ = DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=a_, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Tuple, a_ : str, a_ : Dict, a_ : int, a_ : Dict, a_ : Optional[int], a_ : Optional[Any], a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel(config=a_ )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(a_ )
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Dict, a_ : List[str], a_ : Optional[int], a_ : Tuple, a_ : int, a_ : int, a_ : Tuple, a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaForMaskedLM(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int, a_ : str, a_ : List[str], a_ : Dict, a_ : List[Any], a_ : Optional[int], a_ : Optional[Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFDebertaVaForSequenceClassification(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict, a_ : List[Any], a_ : Optional[int], a_ : Dict, a_ : List[str], a_ : Optional[Any], a_ : Tuple, a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFDebertaVaForTokenClassification(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : List[str], a_ : Any, a_ : Any, a_ : Dict, a_ : Optional[int], a_ : Any, a_ : int, a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaForQuestionAnswering(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Any = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase : int = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : List[Any] = False
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, hidden_size=37 )
def lowercase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a_ )
@require_tf
class UpperCAmelCase ( unittest.TestCase):
@unittest.skip(reason="Model not available yet" )
def lowercase_ ( self : Any ):
"""simple docstring"""
pass
@slow
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
UpperCamelCase__ = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase__ = model(a_, attention_mask=a_ )[0]
UpperCamelCase__ = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4], a_, atol=1e-4 )
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = (KDPMaDiscreteScheduler,)
_lowerCamelCase : Tuple = 10
def lowercase_ ( self : str, **a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a_ )
return config
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001], [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a_, beta_end=a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase__ = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(a_, a_ )
UpperCamelCase__ = model(a_, a_ )
UpperCamelCase__ = scheduler.step(a_, a_, a_ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(a_, a_ )
UpperCamelCase__ = model(a_, a_ )
UpperCamelCase__ = scheduler.step(a_, a_, a_ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def lowercase_ ( self : Any ):
"""simple docstring"""
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps, device=a_ )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter.to(a_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase__ = scheduler.scale_model_input(a_, a_ )
UpperCamelCase__ = model(a_, a_ )
UpperCamelCase__ = scheduler.step(a_, a_, a_ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
if str(a_ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
'''simple docstring'''
from collections import defaultdict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def SCREAMING_SNAKE_CASE__( ) -> List[Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__lowercase = 10, 9
__lowercase = defaultdict(list)
__lowercase = {}
__lowercase = []
__lowercase = 0
__lowercase = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : float | Decimal , _UpperCamelCase : float = 10**-10 ) -> float:
'''simple docstring'''
UpperCamelCase__ = a
while True:
UpperCamelCase__ = Decimal(_UpperCamelCase ) - (
Decimal(eval(_UpperCamelCase ) ) / Decimal(eval(str(diff(_UpperCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_UpperCamelCase ) ) < precision: # noqa: S307
return float(_UpperCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {"vocab_file": "spm_char.model"}
__lowercase = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__lowercase = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = VOCAB_FILES_NAMES
_lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Optional[int], a_ : Tuple, a_ : str="<s>", a_ : List[str]="</s>", a_ : int="<unk>", a_ : Union[str, Any]="<pad>", a_ : Optional[Dict[str, Any]] = None, **a_ : Union[str, Any], ):
"""simple docstring"""
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_, eos_token=a_, unk_token=a_, pad_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self : Union[str, Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Tuple, a_ : str ):
"""simple docstring"""
return self.sp_model.encode(a_, out_type=a_ )
def lowercase_ ( self : Union[str, Any], a_ : List[str] ):
"""simple docstring"""
return self.sp_model.piece_to_id(a_ )
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.sp_model.IdToPiece(a_ )
return token
def lowercase_ ( self : int, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
UpperCamelCase__ = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def lowercase_ ( self : Optional[int], a_ : Dict, a_ : Tuple=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int], a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
UpperCamelCase__ = [1]
if token_ids_a is None:
return ([0] * len(a_ )) + suffix_ones
return ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def lowercase_ ( self : List[Any], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase: str = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: str = ["CLIPFeatureExtractor"]
__lowercase: str = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Tuple = FlaxAutoencoderKL
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.uniform(a_, ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCamelCase__ = MaskFormerConfig(backbone_config=_UpperCamelCase )
UpperCamelCase__ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase__ = 8_47
UpperCamelCase__ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCamelCase__ = 1_50
UpperCamelCase__ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase__ = 1_71
UpperCamelCase__ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCamelCase__ = 1_33
UpperCamelCase__ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase__ = 19
UpperCamelCase__ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCamelCase__ = 65
UpperCamelCase__ = "mapillary-vistas-id2label.json"
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) -> int:
'''simple docstring'''
UpperCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:dim, :]
UpperCamelCase__ = in_proj_bias[: dim]
UpperCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ = in_proj_weight[
-dim :, :
]
UpperCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__( ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = get_maskformer_config(_UpperCamelCase )
# load original state_dict
with open(_UpperCamelCase , "rb" ) as f:
UpperCamelCase__ = pickle.load(_UpperCamelCase )
UpperCamelCase__ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase__ = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_swin_q_k_v(_UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCamelCase , _UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# load 🤗 model
UpperCamelCase__ = MaskFormerForInstanceSegmentation(_UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCamelCase , param.shape )
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
UpperCamelCase__ = prepare_img()
if "vistas" in model_name:
UpperCamelCase__ = 65
elif "cityscapes" in model_name:
UpperCamelCase__ = 6_55_35
else:
UpperCamelCase__ = 2_55
UpperCamelCase__ = True if "ade" in model_name else False
UpperCamelCase__ = MaskFormerImageProcessor(ignore_index=_UpperCamelCase , reduce_labels=_UpperCamelCase )
UpperCamelCase__ = image_processor(_UpperCamelCase , return_tensors="pt" )
UpperCamelCase__ = model(**_UpperCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowercase: Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__lowercase: Union[str, Any] = logging.getLogger()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = os.path.join(_UpperCamelCase , "all_results.json" )
if os.path.exists(_UpperCamelCase ):
with open(_UpperCamelCase , "r" ) as f:
UpperCamelCase__ = json.load(_UpperCamelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
__lowercase: List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : Tuple ):
"""simple docstring"""
import xla_spawn
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(a_, "argv", a_ ):
UpperCamelCase__ = time()
xla_spawn.main()
UpperCamelCase__ = time()
UpperCamelCase__ = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"], 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start, 500 )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import xla_spawn
UpperCamelCase__ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(a_, "argv", a_ ):
xla_spawn.main()
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : str=False ) -> Any:
'''simple docstring'''
UpperCamelCase__ = "backbone." if is_semantic else ""
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', "beit.embeddings.cls_token"),
(F'{prefix}patch_embed.proj.weight', "beit.embeddings.patch_embeddings.projection.weight"),
(F'{prefix}patch_embed.proj.bias', "beit.embeddings.patch_embeddings.projection.bias"),
(F'{prefix}pos_embed', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=False , _UpperCamelCase : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
UpperCamelCase__ = "backbone." if is_semantic else ""
# queries, keys and values
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' )
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' )
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = q_bias
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' )
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' )
UpperCamelCase__ = gamma_a
UpperCamelCase__ = gamma_a
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = False if "rvlcdip" in checkpoint_url else True
UpperCamelCase__ = BeitConfig(use_absolute_position_embeddings=_UpperCamelCase , use_mask_token=_UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase__ = 16
UpperCamelCase__ = "huggingface/label-files"
UpperCamelCase__ = "rvlcdip-id2label.json"
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )["model"]
UpperCamelCase__ = create_rename_keys(_UpperCamelCase , has_lm_head=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , has_lm_head=_UpperCamelCase )
# load HuggingFace model
UpperCamelCase__ = BeitForMaskedImageModeling(_UpperCamelCase ) if has_lm_head else BeitForImageClassification(_UpperCamelCase )
model.eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
UpperCamelCase__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCamelCase )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_UpperCamelCase , return_tensors="pt" )
UpperCamelCase__ = encoding["pixel_values"]
UpperCamelCase__ = model(_UpperCamelCase )
UpperCamelCase__ = outputs.logits
# verify logits
UpperCamelCase__ = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(_UpperCamelCase ), "Shape of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
if has_lm_head:
UpperCamelCase__ = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
UpperCamelCase__ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
__lowercase: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__lowercase: int = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = 'bert-generation'
def __init__( self : Optional[int], a_ : Any=5_0358, a_ : Dict=1024, a_ : Tuple=24, a_ : Tuple=16, a_ : Union[str, Any]=4096, a_ : Union[str, Any]="gelu", a_ : List[Any]=0.1, a_ : int=0.1, a_ : Tuple=512, a_ : Optional[Any]=0.02, a_ : Union[str, Any]=1e-1_2, a_ : Any=0, a_ : List[Any]=2, a_ : Optional[int]=1, a_ : Tuple="absolute", a_ : List[Any]=True, **a_ : List[str], ):
"""simple docstring"""
super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Callable[[int | float], int | float] , _UpperCamelCase : int | float , _UpperCamelCase : int | float , _UpperCamelCase : int = 1_00 , ) -> float:
'''simple docstring'''
UpperCamelCase__ = x_start
UpperCamelCase__ = fnc(_UpperCamelCase )
UpperCamelCase__ = 0.0
for _ in range(_UpperCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCamelCase__ = (x_end - x_start) / steps + xa
UpperCamelCase__ = fnc(_UpperCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCamelCase__ = xa
UpperCamelCase__ = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__lowercase: Tuple = 10
while i <= 100_000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__lowercase: Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__lowercase: Any = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Vector , _UpperCamelCase : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCamelCase ) - np.asarray(_UpperCamelCase )) ** 2 ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Vector , _UpperCamelCase : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCamelCase , _UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase: Tuple = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: int = ["MobileViTFeatureExtractor"]
__lowercase: Union[str, Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Any = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase: Any = logging.get_logger(__name__)
__lowercase: List[Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'resnet'
_lowerCamelCase : int = ['basic', 'bottleneck']
def __init__( self : Any, a_ : int=3, a_ : str=64, a_ : Optional[Any]=[256, 512, 1024, 2048], a_ : List[str]=[3, 4, 6, 3], a_ : Optional[int]="bottleneck", a_ : int="relu", a_ : Tuple=False, a_ : Dict=None, a_ : List[str]=None, **a_ : Optional[int], ):
"""simple docstring"""
super().__init__(**a_ )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
UpperCamelCase__ = num_channels
UpperCamelCase__ = embedding_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = layer_type
UpperCamelCase__ = hidden_act
UpperCamelCase__ = downsample_in_first_stage
UpperCamelCase__ = ["stem"] + [f'stage{idx}' for idx in range(1, len(a_ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=a_, out_indices=a_, stage_names=self.stage_names )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = version.parse('1.11')
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return 1e-3
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = KandinskyImgaImgPipeline
_lowerCamelCase : Tuple = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
_lowerCamelCase : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
_lowerCamelCase : Any = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_lowerCamelCase : Any = False
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return 100
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1005, )
UpperCamelCase__ = MultilingualCLIP(a_ )
UpperCamelCase__ = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase__ = UNetaDConditionModel(**a_ )
return model
@property
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = self.dummy_tokenizer
UpperCamelCase__ = self.dummy_unet
UpperCamelCase__ = self.dummy_movq
UpperCamelCase__ = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCamelCase__ = DDIMScheduler(**a_ )
UpperCamelCase__ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase_ ( self : Any, a_ : Optional[Any], a_ : int=0 ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor((1, self.cross_attention_dim), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(a_ )
# create init_image
UpperCamelCase__ = floats_tensor((1, 3, 64, 64), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ).resize((256, 256) )
if str(a_ ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a_ )
else:
UpperCamelCase__ = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCamelCase__ = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = "cpu"
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**a_ )
UpperCamelCase__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(a_ ) )
UpperCamelCase__ = output.images
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(a_ ), return_dict=a_, )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase__ = "A red cartoon frog, 4k"
UpperCamelCase__ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.floataa )
pipe_prior.to(a_ )
UpperCamelCase__ = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1", torch_dtype=torch.floataa )
UpperCamelCase__ = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
UpperCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ = pipe_prior(
a_, generator=a_, num_inference_steps=5, negative_prompt="", ).to_tuple()
UpperCamelCase__ = pipeline(
a_, image=a_, image_embeds=a_, negative_image_embeds=a_, generator=a_, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", )
UpperCamelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a_, a_ )
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def SCREAMING_SNAKE_CASE__( ) -> Any:
UpperCamelCase__ , UpperCamelCase__ = 9, 14 # noqa: F841
UpperCamelCase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase__ = defaultdict(_UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCamelCase__ = mst(_UpperCamelCase )
UpperCamelCase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCamelCase__ = tuple(answer[:2] )
UpperCamelCase__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = ['speech']
def __init__( self : List[str], *a_ : List[str], **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self, ["speech"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Tuple = ['speech']
def __init__( self : List[Any], *a_ : Tuple, **a_ : List[str] ):
"""simple docstring"""
requires_backends(self, ["speech"] )
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowercase: Optional[Any] = 2
class UpperCAmelCase :
def __init__( self : int, *, # begin keyword-only arguments
a_ : Optional[int]="<s>", a_ : str="<pad>", a_ : Optional[int]="</s>", a_ : Optional[int]="<unk>", a_ : List[Any]=None, ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = bos, unk, pad, eos
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = {}
UpperCamelCase__ = self.add_symbol(a_ )
UpperCamelCase__ = self.add_symbol(a_ )
UpperCamelCase__ = self.add_symbol(a_ )
UpperCamelCase__ = self.add_symbol(a_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(a_ )
UpperCamelCase__ = len(self.symbols )
def __eq__( self : Tuple, a_ : List[str] ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Tuple, a_ : Tuple ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self : int, a_ : Optional[int] ):
"""simple docstring"""
return sym in self.indices
@classmethod
def lowercase_ ( cls : str, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = cls()
d.add_from_file(a_ )
return d
def lowercase_ ( self : Optional[int], a_ : Optional[Any], a_ : Tuple=1, a_ : int=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
UpperCamelCase__ = self.indices[word]
UpperCamelCase__ = self.count[idx] + n
return idx
else:
UpperCamelCase__ = len(self.symbols )
UpperCamelCase__ = idx
self.symbols.append(a_ )
self.count.append(a_ )
return idx
def lowercase_ ( self : Union[str, Any], a_ : Any ):
"""simple docstring"""
return 0
def lowercase_ ( self : Dict, a_ : Tuple ):
"""simple docstring"""
if isinstance(a_, a_ ):
try:
with open(a_, "r", encoding="utf-8" ) as fd:
self.add_from_file(a_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(a_ ) )
return
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = self._load_meta(a_ )
for line in lines[indices_start_line:]:
try:
UpperCamelCase__ , UpperCamelCase__ = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
UpperCamelCase__ = True
UpperCamelCase__ , UpperCamelCase__ = line.rsplit(" ", 1 )
else:
UpperCamelCase__ = False
UpperCamelCase__ = int(a_ )
UpperCamelCase__ = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(a_ ) )
self.add_symbol(a_, n=a_, overwrite=a_ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = dict((re.sub(r"@@$" , "" , _UpperCamelCase ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , _UpperCamelCase ), v) for k, v in d.items() )
UpperCamelCase__ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
UpperCamelCase__ = d[k] # restore
return da
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : str ) -> List[str]:
'''simple docstring'''
if not os.path.exists(_UpperCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
UpperCamelCase__ = os.path.join(_UpperCamelCase , "checkpoint.pt" )
if not os.path.isfile(_UpperCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
UpperCamelCase__ = chkpt["cfg"]["model"]
# dicts
UpperCamelCase__ = os.path.join(_UpperCamelCase , "dict.txt" )
if not os.path.isfile(_UpperCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
UpperCamelCase__ = Dictionary.load(_UpperCamelCase )
UpperCamelCase__ = rewrite_dict_keys(src_dict.indices )
UpperCamelCase__ = len(_UpperCamelCase )
UpperCamelCase__ = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES["vocab_file"] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
UpperCamelCase__ = os.path.join(_UpperCamelCase , "bpecodes" )
if not os.path.isfile(_UpperCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
UpperCamelCase__ = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
# model config
UpperCamelCase__ = os.path.join(_UpperCamelCase , "config.json" )
UpperCamelCase__ = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.0_2,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
UpperCamelCase__ = os.path.join(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 10_24,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
UpperCamelCase__ = chkpt["model"]
# remove unneeded keys
UpperCamelCase__ = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCamelCase__ = model_state_dict.pop(_UpperCamelCase )
else:
UpperCamelCase__ = model_state_dict.pop(_UpperCamelCase )
UpperCamelCase__ = BioGptConfig.from_pretrained(_UpperCamelCase )
UpperCamelCase__ = BioGptForCausalLM(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase )
# save
UpperCamelCase__ = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(_UpperCamelCase , _UpperCamelCase )
print("Conversion is done!" )
if __name__ == "__main__":
__lowercase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowercase: Union[str, Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCamelCase__ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
UpperCamelCase__ = qiskit.QuantumCircuit(_UpperCamelCase , _UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCamelCase__ = qiskit.execute(_UpperCamelCase , _UpperCamelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCamelCase )
if __name__ == "__main__":
__lowercase: List[str] = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__lowercase: Dict = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
__lowercase: Optional[int] = "hopper-medium-v2"
__lowercase: str = gym.make(env_name)
__lowercase: Optional[Any] = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
__lowercase: Optional[Any] = env.reset()
__lowercase: Tuple = 0
__lowercase: int = 0
__lowercase: List[Any] = 1_000
__lowercase: Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__lowercase: Dict = pipeline(obs, planning_horizon=32)
# execute action in environment
__lowercase: Dict = env.step(denorm_actions)
__lowercase: List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
__lowercase: str = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
UpperCamelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 2
while digits < n:
index += 1
UpperCamelCase__ = len(str(fibonacci(_UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(_UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowercase = get_logger(__name__)
class UpperCAmelCase :
def __init__( self : Optional[int], a_ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase__ = (
os.path.join(a_, config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ = Extractor
def lowercase_ ( self : Any, a_ : str ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ = os.path.abspath(a_ )
return os.path.join(self.extract_dir, hash_url_to_filename(a_ ) )
def lowercase_ ( self : Union[str, Any], a_ : str, a_ : bool ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(a_ ) and not (os.path.isdir(a_ ) and os.listdir(a_ ))
)
def lowercase_ ( self : Tuple, a_ : str, a_ : bool = False ):
"""simple docstring"""
UpperCamelCase__ = self.extractor.infer_extractor_format(a_ )
if not extractor_format:
return input_path
UpperCamelCase__ = self._get_output_path(a_ )
if self._do_extract(a_, a_ ):
self.extractor.extract(a_, a_, a_ )
return output_path
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@classmethod
@abstractmethod
def lowercase_ ( cls : Union[str, Any], a_ : Union[Path, str], **a_ : Tuple ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
...
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[bytes] = []
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : int ):
"""simple docstring"""
with open(a_, "rb" ) as f:
return f.read(a_ )
@classmethod
def lowercase_ ( cls : Any, a_ : Union[Path, str], a_ : bytes = b"" ):
"""simple docstring"""
if not magic_number:
UpperCamelCase__ = max(len(a_ ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase__ = cls.read_magic_number(a_, a_ )
except OSError:
return False
return any(magic_number.startswith(a_ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@classmethod
def lowercase_ ( cls : Dict, a_ : Union[Path, str], **a_ : List[Any] ):
"""simple docstring"""
return tarfile.is_tarfile(a_ )
@staticmethod
def lowercase_ ( a_ : Optional[Any], a_ : int ):
"""simple docstring"""
def resolved(a_ : str ) -> str:
return os.path.realpath(os.path.abspath(a_ ) )
def badpath(a_ : str, a_ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a_, a_ ) ).startswith(a_ )
def badlink(a_ : Union[str, Any], a_ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ = resolved(os.path.join(a_, os.path.dirname(info.name ) ) )
return badpath(info.linkname, base=a_ )
UpperCamelCase__ = resolved(a_ )
for finfo in members:
if badpath(finfo.name, a_ ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(a_, a_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(a_, a_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(a_, exist_ok=a_ )
UpperCamelCase__ = tarfile.open(a_ )
tar_file.extractall(a_, members=TarExtractor.safemembers(a_, a_ ) )
tar_file.close()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = [B'\x1F\x8B']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
with gzip.open(a_, "rb" ) as gzip_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def lowercase_ ( cls : Dict, a_ : Union[Path, str], a_ : bytes = b"" ):
"""simple docstring"""
if super().is_extractable(a_, magic_number=a_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a_, "rb" ) as fp:
UpperCamelCase__ = _EndRecData(a_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ = fp.read(a_ ) # CD is where we expect it to be
if len(a_ ) == sizeCentralDir:
UpperCamelCase__ = struct.unpack(a_, a_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(a_, exist_ok=a_ )
with zipfile.ZipFile(a_, "r" ) as zip_file:
zip_file.extractall(a_ )
zip_file.close()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
with lzma.open(a_ ) as compressed_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(a_, exist_ok=a_ )
UpperCamelCase__ = rarfile.RarFile(a_ )
rf.extractall(a_ )
rf.close()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = [B'\x28\xb5\x2F\xFD']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
UpperCamelCase__ = zstd.ZstdDecompressor()
with open(a_, "rb" ) as ifh, open(a_, "wb" ) as ofh:
dctx.copy_stream(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = [B'\x42\x5A\x68']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
with bza.open(a_, "rb" ) as compressed_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(a_, exist_ok=a_ )
with pyazr.SevenZipFile(a_, "r" ) as archive:
archive.extractall(a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = [B'\x04\x22\x4D\x18']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(a_, "rb" ) as compressed_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_lowerCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowercase_ ( cls : List[str] ):
"""simple docstring"""
return max(
len(a_ )
for extractor in cls.extractors.values()
if issubclass(a_, a_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : int ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(a_, magic_number_length=a_ )
except OSError:
return b""
@classmethod
def lowercase_ ( cls : Tuple, a_ : Union[Path, str], a_ : bool = False ):
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.", category=a_, )
UpperCamelCase__ = cls.infer_extractor_format(a_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowercase_ ( cls : List[Any], a_ : Union[Path, str] ): # <Added version="2.4.0"/>
"""simple docstring"""
UpperCamelCase__ = cls._get_magic_number_max_length()
UpperCamelCase__ = cls._read_magic_number(a_, a_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a_, magic_number=a_ ):
return extractor_format
@classmethod
def lowercase_ ( cls : int, a_ : Union[Path, str], a_ : Union[Path, str], a_ : Optional[str] = None, a_ : Optional[BaseExtractor] = "deprecated", ):
"""simple docstring"""
os.makedirs(os.path.dirname(a_ ), exist_ok=a_ )
# Prevent parallel extractions
UpperCamelCase__ = str(Path(a_ ).with_suffix(".lock" ) )
with FileLock(a_ ):
shutil.rmtree(a_, ignore_errors=a_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a_, a_ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead.", category=a_, )
UpperCamelCase__ = extractor if extractor != "deprecated" else extractor_format
else:
UpperCamelCase__ = cls.extractors[extractor_format]
return extractor.extract(a_, a_ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0.", category=a_, )
for extractor in cls.extractors.values():
if extractor.is_extractable(a_ ):
return extractor.extract(a_, a_ )
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
_lowerCamelCase : float
_lowerCamelCase : TreeNode | None = None
_lowerCamelCase : TreeNode | None = None
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(_UpperCamelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_UpperCamelCase ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_UpperCamelCase : TreeNode | None , _UpperCamelCase : float , _UpperCamelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _UpperCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _UpperCamelCase )
)
return is_binary_search_tree_recursive_check(_UpperCamelCase , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
UpperCamelCase__ = -1
UpperCamelCase__ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase__ = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase__ = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase__ = a * b * c
if candidate >= product:
UpperCamelCase__ = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
UpperCamelCase__ = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowercase: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowercase: list[int] = [ord(letter) for letter in string.ascii_lowercase]
__lowercase: set[int] = {ord(char) for char in VALID_CHARS}
__lowercase: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] , _UpperCamelCase : tuple[int, ...] ) -> str | None:
'''simple docstring'''
UpperCamelCase__ = ""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
for keychar, cipherchar in zip(cycle(_UpperCamelCase ) , _UpperCamelCase ):
UpperCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCamelCase )
return decoded
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ = []
for key in product(_UpperCamelCase , repeat=3 ):
UpperCamelCase__ = try_key(_UpperCamelCase , _UpperCamelCase )
if encoded is not None:
possibles.append(_UpperCamelCase )
return possibles
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[str] , _UpperCamelCase : str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = Path(_UpperCamelCase ).parent.joinpath(_UpperCamelCase ).read_text(encoding="utf-8" )
UpperCamelCase__ = [int(_UpperCamelCase ) for number in data.strip().split("," )]
UpperCamelCase__ = filter_valid_chars(_UpperCamelCase )
for common_word in COMMON_WORDS:
UpperCamelCase__ = filter_common_word(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
break
UpperCamelCase__ = possibles[0]
return sum(ord(_UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__lowercase: Dict = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__lowercase: Optional[Any] = "sshleifer/student_marian_en_ro_6_1"
__lowercase: Union[str, Any] = "sshleifer/tiny-mbart"
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : str, a_ : int=False, a_ : Any=None, a_ : Any=True, a_ : Optional[int]=True, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = self.run_trainer(
eval_steps=1, max_len=12, model_name=a_, num_train_epochs=1, distributed=a_, extra_args_str=a_, predict_with_generate=a_, do_train=a_, do_eval=a_, do_predict=a_, )
UpperCamelCase__ = TrainerState.load_from_json(os.path.join(a_, "trainer_state.json" ) ).log_history
if not do_eval:
return
UpperCamelCase__ = [log for log in logs if "eval_loss" in log.keys()]
UpperCamelCase__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCamelCase__ = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"], a_ )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_ )
@require_torch_multi_gpu
def lowercase_ ( self : Dict ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowercase_ ( self : Tuple ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_, extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowercase_ ( self : List[str] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_, extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowercase_ ( self : Tuple ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_, extra_args_str="--sharded_ddp zero_dp_2", predict_with_generate=a_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowercase_ ( self : str ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=a_, extra_args_str="--sharded_ddp zero_dp_2 --fp16", predict_with_generate=a_ )
@require_apex
@require_torch_gpu
def lowercase_ ( self : str ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_, extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=a_, extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def lowercase_ ( self : List[Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
UpperCamelCase__ = experiments[experiment_id]
UpperCamelCase__ = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
UpperCamelCase__ = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**a_, extra_args_str=data["extra_args_str"] )
UpperCamelCase__ = len(re.findall(a_, cl.err ) )
self.assertEqual(a_, data["n_matches"] )
@slow
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.run_trainer(
eval_steps=2, max_len=128, model_name=a_, learning_rate=3e-4, num_train_epochs=10, distributed=a_, )
# Check metrics
UpperCamelCase__ = TrainerState.load_from_json(os.path.join(a_, "trainer_state.json" ) ).log_history
UpperCamelCase__ = [log for log in logs if "eval_loss" in log.keys()]
UpperCamelCase__ = eval_metrics[0]
UpperCamelCase__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"], a_ )
# test if do_predict saves generations and metrics
UpperCamelCase__ = os.listdir(a_ )
UpperCamelCase__ = {os.path.basename(a_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowercase_ ( self : str ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(a_ : str ) -> Tuple[int, float]:
UpperCamelCase__ = "--skip_memory_metrics 0"
UpperCamelCase__ = self.run_trainer(
max_len=128, model_name=a_, learning_rate=3e-4, num_train_epochs=1, optim=a_, distributed=a_, extra_args_str=a_, do_eval=a_, do_predict=a_, n_gpus_to_use=1, )
# Check metrics
UpperCamelCase__ = TrainerState.load_from_json(Path(a_, "trainer_state.json" ) ).log_history
UpperCamelCase__ = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
UpperCamelCase__ = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
UpperCamelCase__ = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCamelCase__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCamelCase__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCamelCase__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCamelCase__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCamelCase__ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
a_, a_, "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
f' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB', )
self.assertGreater(
a_, a_, "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
f' gpu_total_mem_bnb={gpu_total_mem_bnb}MB', )
self.assertEqual(
a_, a_, f'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def lowercase_ ( self : Union[str, Any], a_ : int, a_ : str, a_ : int, a_ : float = 3e-3, a_ : str = "adafactor", a_ : bool = False, a_ : str = None, a_ : int = 0, a_ : bool = True, a_ : bool = True, a_ : bool = True, a_ : bool = True, a_ : int = None, ):
"""simple docstring"""
UpperCamelCase__ = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = f'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(a_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(a_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
UpperCamelCase__ = f'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(a_ )}\n '.split()
UpperCamelCase__ = "\n --do_predict\n ".split()
UpperCamelCase__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCamelCase__ = get_gpu_count()
UpperCamelCase__ = get_torch_dist_unique_port()
UpperCamelCase__ = f'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
UpperCamelCase__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a_, env=self.get_env() )
else:
UpperCamelCase__ = ["run_translation.py"] + args
with patch.object(a_, "argv", a_ ):
main()
return output_dir
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
from typing import Any
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : dict , _UpperCamelCase : dict , _UpperCamelCase : dict , ) -> list:
'''simple docstring'''
_validation(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
# Creates data structures and fill initial step
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for state in states_space:
UpperCamelCase__ = observations_space[0]
UpperCamelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_UpperCamelCase ) ):
UpperCamelCase__ = observations_space[o]
UpperCamelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase__ = ""
UpperCamelCase__ = -1
for k_state in states_space:
UpperCamelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase__ = probability
UpperCamelCase__ = k_state
# Update probabilities and pointers dicts
UpperCamelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase__ = arg_max
# The final observation
UpperCamelCase__ = observations_space[len(_UpperCamelCase ) - 1]
# argmax for given final observation
UpperCamelCase__ = ""
UpperCamelCase__ = -1
for k_state in states_space:
UpperCamelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase__ = probability
UpperCamelCase__ = k_state
UpperCamelCase__ = arg_max
# Process pointers backwards
UpperCamelCase__ = last_state
UpperCamelCase__ = []
for o in range(len(_UpperCamelCase ) - 1 , -1 , -1 ):
result.append(_UpperCamelCase )
UpperCamelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
_validate_lists(_UpperCamelCase , _UpperCamelCase )
_validate_dicts(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> None:
'''simple docstring'''
_validate_list(_UpperCamelCase , "observations_space" )
_validate_list(_UpperCamelCase , "states_space" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : str ) -> None:
'''simple docstring'''
if not isinstance(_object , _UpperCamelCase ):
UpperCamelCase__ = F'{var_name} must be a list'
raise ValueError(_UpperCamelCase )
else:
for x in _object:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = F'{var_name} must be a list of strings'
raise ValueError(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , ) -> None:
'''simple docstring'''
_validate_dict(_UpperCamelCase , "initial_probabilities" , _UpperCamelCase )
_validate_nested_dict(_UpperCamelCase , "transition_probabilities" )
_validate_nested_dict(_UpperCamelCase , "emission_probabilities" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : str ) -> None:
'''simple docstring'''
_validate_dict(_object , _UpperCamelCase , _UpperCamelCase )
for x in _object.values():
_validate_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : type , _UpperCamelCase : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , _UpperCamelCase ):
UpperCamelCase__ = F'{var_name} must be a dict'
raise ValueError(_UpperCamelCase )
if not all(isinstance(_UpperCamelCase , _UpperCamelCase ) for x in _object ):
UpperCamelCase__ = F'{var_name} all keys must be strings'
raise ValueError(_UpperCamelCase )
if not all(isinstance(_UpperCamelCase , _UpperCamelCase ) for x in _object.values() ):
UpperCamelCase__ = "nested dictionary " if nested else ""
UpperCamelCase__ = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self : Dict, a_ : List[Any], a_ : Any=13, a_ : Tuple=7, a_ : Optional[int]=True, a_ : List[Any]=True, a_ : Optional[int]=False, a_ : Optional[Any]=True, a_ : Dict=99, a_ : str=32, a_ : int=5, a_ : Optional[int]=4, a_ : Dict=37, a_ : Dict="gelu", a_ : Optional[int]=0.1, a_ : Dict=0.1, a_ : List[Any]=512, a_ : Any=16, a_ : List[Any]=2, a_ : List[str]=0.02, a_ : Dict=3, a_ : List[Any]=4, a_ : int=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : str ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a_, initializer_range=self.initializer_range, )
def lowercase_ ( self : Any, a_ : Tuple, a_ : Optional[int], a_ : str, a_ : int, a_ : Optional[Any], a_ : Union[str, Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_ )
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : int, a_ : Optional[Any], a_ : Optional[Any], a_ : str, a_ : Any, a_ : List[Any], a_ : Optional[int], a_ : Optional[int], a_ : List[Any], a_ : Any, ):
"""simple docstring"""
UpperCamelCase__ = BioGptForCausalLM(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, token_type_ids=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : List[Any], a_ : Tuple, a_ : Any, a_ : int, a_ : Union[str, Any], a_ : List[str], *a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
# create attention mask
UpperCamelCase__ = torch.ones(input_ids.shape, dtype=torch.long, device=a_ )
UpperCamelCase__ = self.seq_length // 2
UpperCamelCase__ = 0
# first forward pass
UpperCamelCase__ , UpperCamelCase__ = model(a_, attention_mask=a_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 1), config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase__ = ids_tensor((1,), a_ ).item() + 1
UpperCamelCase__ = ids_tensor((self.batch_size, 1), config.vocab_size ).squeeze(-1 )
UpperCamelCase__ = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase__ = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCamelCase__ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=a_ )], dim=1, )
# get two different outputs
UpperCamelCase__ = model(a_, attention_mask=a_ )["last_hidden_state"]
UpperCamelCase__ = model(a_, past_key_values=a_, attention_mask=a_ )["last_hidden_state"]
# select random slice
UpperCamelCase__ = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : Any, a_ : Dict, a_ : Any, a_ : Any, a_ : int, a_ : Optional[Any], *a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptModel(config=a_ ).to(a_ ).eval()
UpperCamelCase__ = torch.ones(input_ids.shape, dtype=torch.long, device=a_ )
# first forward pass
UpperCamelCase__ = model(a_, attention_mask=a_, use_cache=a_ )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3), 2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask], dim=-1 )
UpperCamelCase__ = model(a_, attention_mask=a_ )["last_hidden_state"]
UpperCamelCase__ = model(a_, attention_mask=a_, past_key_values=a_ )[
"last_hidden_state"
]
# select random slice
UpperCamelCase__ = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : Optional[int], a_ : Optional[Any], a_ : int, a_ : Optional[int], a_ : str, a_ : List[str], *a_ : str, a_ : Tuple=False ):
"""simple docstring"""
UpperCamelCase__ = BioGptForCausalLM(a_ )
model.to(a_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase__ = model(a_, labels=a_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase_ ( self : Any, a_ : List[Any], *a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptModel(a_ )
UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ), 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ), 0.01 )
def lowercase_ ( self : Tuple, a_ : int, a_ : List[str], a_ : Tuple, a_ : Dict, a_ : Tuple, *a_ : int ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = BioGptForTokenClassification(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, token_type_ids=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Any = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCamelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCamelCase : int = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Any = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, hidden_size=37 )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*a_, gradient_checkpointing=a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*a_ )
@slow
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(a_ )
UpperCamelCase__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCamelCase__ = "left"
# Define PAD Token = EOS Token = 50256
UpperCamelCase__ = tokenizer.eos_token
UpperCamelCase__ = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase__ = [
"Hello, my dog is a little",
"Today, I",
]
UpperCamelCase__ = tokenizer(a_, return_tensors="pt", padding=a_ )
UpperCamelCase__ = inputs["input_ids"].to(a_ )
UpperCamelCase__ = model.generate(
input_ids=a_, attention_mask=inputs["attention_mask"].to(a_ ), )
UpperCamelCase__ = tokenizer(sentences[0], return_tensors="pt" ).input_ids.to(a_ )
UpperCamelCase__ = model.generate(input_ids=a_ )
UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
UpperCamelCase__ = tokenizer(sentences[1], return_tensors="pt" ).input_ids.to(a_ )
UpperCamelCase__ = model.generate(input_ids=a_, max_length=model.config.max_length - num_paddings )
UpperCamelCase__ = tokenizer.batch_decode(a_, skip_special_tokens=a_ )
UpperCamelCase__ = tokenizer.decode(output_non_padded[0], skip_special_tokens=a_ )
UpperCamelCase__ = tokenizer.decode(output_padded[0], skip_special_tokens=a_ )
UpperCamelCase__ = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, [non_padded_sentence, padded_sentence] )
@slow
def lowercase_ ( self : List[str] ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = BioGptModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = input_dict["input_ids"]
UpperCamelCase__ = input_ids.ne(1 ).to(a_ )
UpperCamelCase__ = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCamelCase__ = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, labels=a_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = "multi_label_classification"
UpperCamelCase__ = input_dict["input_ids"]
UpperCamelCase__ = input_ids.ne(1 ).to(a_ )
UpperCamelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, labels=a_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase ( unittest.TestCase):
@slow
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
UpperCamelCase__ = torch.tensor([[2, 4805, 9, 656, 21]] )
UpperCamelCase__ = model(a_ )[0]
UpperCamelCase__ = 4_2384
UpperCamelCase__ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape, a_ )
UpperCamelCase__ = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], a_, atol=1e-4 ) )
@slow
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCamelCase__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(a_ )
torch.manual_seed(0 )
UpperCamelCase__ = tokenizer("COVID-19 is", return_tensors="pt" ).to(a_ )
UpperCamelCase__ = model.generate(
**a_, min_length=100, max_length=1024, num_beams=5, early_stopping=a_, )
UpperCamelCase__ = tokenizer.decode(output_ids[0], skip_special_tokens=a_ )
UpperCamelCase__ = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(a_, a_ )
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase: int = "src/diffusers"
__lowercase: List[str] = "."
# This is to make sure the diffusers module imported is the one in the repo.
__lowercase: str = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowercase: List[Any] = spec.loader.load_module()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , _UpperCamelCase ) is not None
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Any:
'''simple docstring'''
UpperCamelCase__ = object_name.split("." )
UpperCamelCase__ = 0
# First let's find the module where our object lives.
UpperCamelCase__ = parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase , F'{module}.py' ) ):
i += 1
if i < len(_UpperCamelCase ):
UpperCamelCase__ = os.path.join(_UpperCamelCase , parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_UpperCamelCase , F'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase__ = ""
UpperCamelCase__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase__ = line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index] , _UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase__ = lines[start_index:line_index]
return "".join(_UpperCamelCase )
__lowercase: Tuple = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
__lowercase: Any = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
__lowercase: List[str] = re.compile(r"<FILL\s+[^>]*>")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = code.split("\n" )
UpperCamelCase__ = 0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
UpperCamelCase__ = F'class Bla:\n{code}'
UpperCamelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_UpperCamelCase )
UpperCamelCase__ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
UpperCamelCase__ , UpperCamelCase__ = style_docstrings_in_code(_UpperCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int=False ) -> Any:
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = []
UpperCamelCase__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
UpperCamelCase__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = search.groups()
UpperCamelCase__ = find_code_in_diffusers(_UpperCamelCase )
UpperCamelCase__ = get_indent(_UpperCamelCase )
UpperCamelCase__ = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase__ = theoretical_indent
UpperCamelCase__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase__ = True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _should_continue(_UpperCamelCase , _UpperCamelCase ) and re.search(F'^{indent}# End copy' , _UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase__ = lines[start_index:line_index]
UpperCamelCase__ = "".join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase__ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_UpperCamelCase ) is None]
UpperCamelCase__ = "\n".join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = replace_pattern.replace("with" , "" ).split("," )
UpperCamelCase__ = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = pattern.groups()
UpperCamelCase__ = re.sub(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if option.strip() == "all-casing":
UpperCamelCase__ = re.sub(obja.lower() , obja.lower() , _UpperCamelCase )
UpperCamelCase__ = re.sub(obja.upper() , obja.upper() , _UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase__ = blackify(lines[start_index - 1] + theoretical_code )
UpperCamelCase__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCamelCase__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase__ = start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
return diffs
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = glob.glob(os.path.join(_UpperCamelCase , "**/*.py" ) , recursive=_UpperCamelCase )
UpperCamelCase__ = []
for filename in all_files:
UpperCamelCase__ = is_copy_consistent(_UpperCamelCase , _UpperCamelCase )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join(_UpperCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__lowercase: Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowercase: List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase: int = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[Any] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__lowercase: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = ""
else:
UpperCamelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = ViTMSNConfig()
UpperCamelCase__ = 10_00
UpperCamelCase__ = "datasets/huggingface/label-files"
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ = 3_84
UpperCamelCase__ = 15_36
UpperCamelCase__ = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ = 7
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
UpperCamelCase__ = ViTMSNModel(_UpperCamelCase )
UpperCamelCase__ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )["target_encoder"]
UpperCamelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCamelCase )
UpperCamelCase__ = create_rename_keys(_UpperCamelCase , base_model=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , base_model=_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
UpperCamelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=_UpperCamelCase , image_std=_UpperCamelCase )
UpperCamelCase__ = image_processor(images=_UpperCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ = model(**_UpperCamelCase )
UpperCamelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
UpperCamelCase__ = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCamelCase , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__lowercase: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__lowercase: Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(_UpperCamelCase ) , _UpperCamelCase )
return number - int(_UpperCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase :
def __init__( self : Tuple, a_ : Optional[Any], a_ : Union[str, Any]=2, a_ : List[str]=32, a_ : List[str]=16, a_ : Any=3, a_ : Any=True, a_ : List[str]=True, a_ : Dict=32, a_ : int=4, a_ : Optional[int]=[0, 1, 2, 3], a_ : Dict=4, a_ : str=37, a_ : Tuple="gelu", a_ : int=0.1, a_ : Optional[int]=0.1, a_ : int=0.02, a_ : str=3, a_ : Any=[1, 384, 24, 24], a_ : Optional[Any]=True, a_ : Dict=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = backbone_out_indices
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = backbone_featmap_shape
UpperCamelCase__ = scope
UpperCamelCase__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=a_, backbone_featmap_shape=self.backbone_featmap_shape, )
def lowercase_ ( self : List[str], a_ : str, a_ : Optional[int], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = DPTModel(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any], a_ : Tuple, a_ : Tuple, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def lowercase_ ( self : Optional[int], a_ : Any, a_ : Tuple, a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_lowerCamelCase : Tuple = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : List[str] = False
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = DPTModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def lowercase_ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def lowercase_ ( self : int ):
"""simple docstring"""
pass
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
if model_class in get_values(a_ ):
continue
UpperCamelCase__ = model_class(a_ )
model.to(a_ )
model.train()
UpperCamelCase__ = self._prepare_for_class(a_, a_, return_labels=a_ )
UpperCamelCase__ = model(**a_ ).loss
loss.backward()
def lowercase_ ( self : Any ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = False
UpperCamelCase__ = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase__ = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase__ = self._prepare_for_class(a_, a_, return_labels=a_ )
UpperCamelCase__ = model(**a_ ).loss
loss.backward()
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = _config_zero_init(a_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(config=a_ )
# Skip the check for the backbone
UpperCamelCase__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase__ = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase_ ( self : Any ):
"""simple docstring"""
pass
@slow
def lowercase_ ( self : List[str] ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase__ = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = "add"
with self.assertRaises(a_ ):
UpperCamelCase__ = DPTForDepthEstimation(a_ )
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
UpperCamelCase__ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=a_, return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**a_ )
UpperCamelCase__ = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase__ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape, a_ )
UpperCamelCase__ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, a_, atol=1e-4 ) )
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for i in range(0 , _UpperCamelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for i in range(_UpperCamelCase , 0 , -1 ):
for _ in range(_UpperCamelCase , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : Any ) -> int:
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_UpperCamelCase ) # upper half
reverse_floyd(_UpperCamelCase ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
__lowercase: Dict = 1
while K:
__lowercase: List[Any] = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__lowercase: Optional[int] = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__lowercase: Tuple = logging.getLogger(__name__)
if __name__ == "__main__":
__lowercase: Optional[Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
__lowercase: Optional[Any] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
__lowercase: List[str] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__lowercase: List[str] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__lowercase: List[Any] = [0] * args.vocab_size
for k, v in counter.items():
__lowercase: List[str] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> str:
UpperCamelCase__ = []
for line in lines:
UpperCamelCase__ = re.sub(r"#.*" , "" , _UpperCamelCase ) # remove comments
if line:
filtered_lines.append(_UpperCamelCase )
UpperCamelCase__ = "\n".join(_UpperCamelCase )
# Make a hash from all this code
UpperCamelCase__ = full_str.encode("utf-8" )
return shaaaa(_UpperCamelCase ).hexdigest()
# get importable module names and hash for caching
__lowercase: Optional[Any] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowercase: Optional[int] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowercase: List[Any] = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
__lowercase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase: str = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Optional[int] = PegasusTokenizer
_lowerCamelCase : Optional[Any] = PegasusTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = True
def lowercase_ ( self : List[str] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = PegasusTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowercase_ ( self : str, **a_ : Any ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[str], a_ : List[Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = "</s>"
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<pad>" )
self.assertEqual(vocab_keys[1], "</s>" )
self.assertEqual(vocab_keys[-1], "v" )
self.assertEqual(len(a_ ), 1103 )
def lowercase_ ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1103 )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase__ = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
UpperCamelCase__ = rust_tokenizer([raw_input_str], return_tensors=a_, add_special_tokens=a_ ).input_ids[0]
UpperCamelCase__ = py_tokenizer([raw_input_str], return_tensors=a_, add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCamelCase__ = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
UpperCamelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
UpperCamelCase__ = tokenizer([raw_input_str], return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCamelCase__ = "To ensure a smooth flow of bank resolutions."
UpperCamelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
UpperCamelCase__ = tokenizer([raw_input_str], return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_, a_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = ["This is going to be way too long." * 150, "short example"]
UpperCamelCase__ = ["not super long but more than 5 tokens", "tiny"]
UpperCamelCase__ = self._large_tokenizer(a_, padding=a_, truncation=a_, return_tensors="pt" )
UpperCamelCase__ = self._large_tokenizer(
text_target=a_, max_length=5, padding=a_, truncation=a_, return_tensors="pt" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
@slow
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = {"input_ids": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="google/bigbird-pegasus-large-arxiv", revision="ba85d0851d708441f91440d509690f1ab6353415", )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Optional[int] = PegasusTokenizer
_lowerCamelCase : List[str] = PegasusTokenizerFast
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Union[str, Any] = True
def lowercase_ ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = PegasusTokenizer(a_, offset=0, mask_token_sent=a_, mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowercase_ ( self : Optional[Any], **a_ : int ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : Optional[int], a_ : str ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase__ = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
UpperCamelCase__ = rust_tokenizer([raw_input_str], return_tensors=a_, add_special_tokens=a_ ).input_ids[0]
UpperCamelCase__ = py_tokenizer([raw_input_str], return_tensors=a_, add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_, a_ )
@require_torch
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = ["This is going to be way too long." * 1000, "short example"]
UpperCamelCase__ = ["not super long but more than 5 tokens", "tiny"]
UpperCamelCase__ = self._large_tokenizer(a_, padding=a_, truncation=a_, return_tensors="pt" )
UpperCamelCase__ = self._large_tokenizer(
text_target=a_, max_length=5, padding=a_, truncation=a_, return_tensors="pt" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
UpperCamelCase__ = self._large_tokenizer(a_ ).input_ids
self.assertListEqual(
a_, [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1], )
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__lowercase: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Optional[int], **a_ : Tuple ):
"""simple docstring"""
requires_backends(self, ["bs4"] )
super().__init__(**a_ )
def lowercase_ ( self : List[Any], a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase__ = parent.find_all(child.name, recursive=a_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(a_ ) else next(i for i, s in enumerate(a_, 1 ) if s is child ) )
UpperCamelCase__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowercase_ ( self : Dict, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = BeautifulSoup(a_, "html.parser" )
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for element in html_code.descendants:
if type(a_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase__ = html.unescape(a_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(a_ )
UpperCamelCase__ , UpperCamelCase__ = self.xpath_soup(a_ )
stringaxtag_seq.append(a_ )
stringaxsubs_seq.append(a_ )
if len(a_ ) != len(a_ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(a_ ) != len(a_ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowercase_ ( self : List[Any], a_ : str, a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = ""
for tagname, subs in zip(a_, a_ ):
xpath += f'/{tagname}'
if subs != 0:
xpath += f'[{subs}]'
return xpath
def __call__( self : Tuple, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = False
# Check that strings has a valid type
if isinstance(a_, a_ ):
UpperCamelCase__ = True
elif isinstance(a_, (list, tuple) ):
if len(a_ ) == 0 or isinstance(html_strings[0], a_ ):
UpperCamelCase__ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f'but is of type {type(a_ )}.' )
UpperCamelCase__ = bool(isinstance(a_, (list, tuple) ) and (isinstance(html_strings[0], a_ )) )
if not is_batched:
UpperCamelCase__ = [html_strings]
# Get nodes + xpaths
UpperCamelCase__ = []
UpperCamelCase__ = []
for html_string in html_strings:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.get_three_from_single(a_ )
nodes.append(a_ )
UpperCamelCase__ = []
for node, tag_list, sub_list in zip(a_, a_, a_ ):
UpperCamelCase__ = self.construct_xpath(a_, a_ )
xpath_strings.append(a_ )
xpaths.append(a_ )
# return as Dict
UpperCamelCase__ = {"nodes": nodes, "xpaths": xpaths}
UpperCamelCase__ = BatchFeature(data=a_, tensor_type=a_ )
return encoded_inputs
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
UpperCamelCase__ = xa
UpperCamelCase__ = xa
while True:
if x_n == x_na or function(_UpperCamelCase ) == function(_UpperCamelCase ):
raise ZeroDivisionError("float division by zero, could not find root" )
UpperCamelCase__ = x_na - (
function(_UpperCamelCase ) / ((function(_UpperCamelCase ) - function(_UpperCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCamelCase__ = x_na
UpperCamelCase__ = x_na
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
return math.pow(_UpperCamelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase_ ( *a_ : Any, **a_ : Any ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase ( unittest.TestCase):
@require_torch
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__ = image_classifier(a_, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a_ ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
UpperCamelCase__ = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
], )
@require_tf
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__ = image_classifier(a_, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(a_ ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
UpperCamelCase__ = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
], )
@slow
@require_torch
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__ = image_classifier(a_, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
UpperCamelCase__ = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__ = image_classifier(a_, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
UpperCamelCase__ = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : Tuple, a_ : Tuple, a_ : Any=7, a_ : Dict=3, a_ : Dict=10, a_ : Tuple=18, a_ : Tuple=30, a_ : int=400, a_ : str=True, a_ : Optional[int]=None, a_ : str=True, a_ : Union[str, Any]=[0.5, 0.5, 0.5], a_ : List[Any]=[0.5, 0.5, 0.5], a_ : List[Any]=None, ):
"""simple docstring"""
UpperCamelCase__ = size if size is not None else {"shortest_edge": 18}
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_frames
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = crop_size
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Optional[Any] = VivitImageProcessor if is_vision_available() else None
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = VivitImageProcessingTester(self )
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, "image_mean" ) )
self.assertTrue(hasattr(a_, "image_std" ) )
self.assertTrue(hasattr(a_, "do_normalize" ) )
self.assertTrue(hasattr(a_, "do_resize" ) )
self.assertTrue(hasattr(a_, "do_center_crop" ) )
self.assertTrue(hasattr(a_, "size" ) )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84} )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase__ = prepare_video_inputs(self.image_processor_tester, equal_resolution=a_ )
for video in video_inputs:
self.assertIsInstance(a_, a_ )
self.assertIsInstance(video[0], Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(video_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_video_inputs(self.image_processor_tester, equal_resolution=a_, numpify=a_ )
for video in video_inputs:
self.assertIsInstance(a_, a_ )
self.assertIsInstance(video[0], np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(video_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_video_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ )
for video in video_inputs:
self.assertIsInstance(a_, a_ )
self.assertIsInstance(video[0], torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(video_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any=8 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCamelCase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : List[Any], a_ : MultilingualCLIP, a_ : XLMRobertaTokenizer, a_ : UNetaDConditionModel, a_ : Union[DDIMScheduler, DDPMScheduler], a_ : VQModel, ):
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=a_, tokenizer=a_, unet=a_, scheduler=a_, movq=a_, )
UpperCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase_ ( self : Tuple, a_ : Optional[int], a_ : Union[str, Any], a_ : Optional[Any], a_ : List[Any], a_ : Any, a_ : str ):
"""simple docstring"""
if latents is None:
UpperCamelCase__ = randn_tensor(a_, generator=a_, device=a_, dtype=a_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
UpperCamelCase__ = latents.to(a_ )
UpperCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def lowercase_ ( self : int, a_ : Optional[Any], a_ : Union[str, Any], a_ : List[Any], a_ : List[Any], a_ : Tuple=None, ):
"""simple docstring"""
UpperCamelCase__ = len(a_ ) if isinstance(a_, a_ ) else 1
# get prompt text embeddings
UpperCamelCase__ = self.tokenizer(
a_, padding="max_length", truncation=a_, max_length=77, return_attention_mask=a_, add_special_tokens=a_, return_tensors="pt", )
UpperCamelCase__ = text_inputs.input_ids
UpperCamelCase__ = self.tokenizer(a_, padding="longest", return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a_, a_ ):
UpperCamelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
UpperCamelCase__ = text_input_ids.to(a_ )
UpperCamelCase__ = text_inputs.attention_mask.to(a_ )
UpperCamelCase__ , UpperCamelCase__ = self.text_encoder(
input_ids=a_, attention_mask=a_ )
UpperCamelCase__ = prompt_embeds.repeat_interleave(a_, dim=0 )
UpperCamelCase__ = text_encoder_hidden_states.repeat_interleave(a_, dim=0 )
UpperCamelCase__ = text_mask.repeat_interleave(a_, dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = 42
if negative_prompt is None:
UpperCamelCase__ = [""] * batch_size
elif type(a_ ) is not type(a_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !='
f' {type(a_ )}.' )
elif isinstance(a_, a_ ):
UpperCamelCase__ = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
UpperCamelCase__ = negative_prompt
UpperCamelCase__ = self.tokenizer(
a_, padding="max_length", max_length=77, truncation=a_, return_attention_mask=a_, add_special_tokens=a_, return_tensors="pt", )
UpperCamelCase__ = uncond_input.input_ids.to(a_ )
UpperCamelCase__ = uncond_input.attention_mask.to(a_ )
UpperCamelCase__ , UpperCamelCase__ = self.text_encoder(
input_ids=a_, attention_mask=a_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ = negative_prompt_embeds.shape[1]
UpperCamelCase__ = negative_prompt_embeds.repeat(1, a_ )
UpperCamelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt, a_ )
UpperCamelCase__ = uncond_text_encoder_hidden_states.shape[1]
UpperCamelCase__ = uncond_text_encoder_hidden_states.repeat(1, a_, 1 )
UpperCamelCase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt, a_, -1 )
UpperCamelCase__ = uncond_text_mask.repeat_interleave(a_, dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCamelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCamelCase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowercase_ ( self : Tuple, a_ : Union[str, Any]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase__ = torch.device(f'cuda:{gpu_id}' )
UpperCamelCase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a_, a_ )
def lowercase_ ( self : List[Any], a_ : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase__ = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=a_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCamelCase__ , UpperCamelCase__ = cpu_offload_with_hook(a_, a_, prev_module_hook=a_ )
if self.safety_checker is not None:
UpperCamelCase__ , UpperCamelCase__ = cpu_offload_with_hook(self.safety_checker, a_, prev_module_hook=a_ )
# We'll offload the last model manually.
UpperCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase_ ( self : Tuple ):
"""simple docstring"""
if not hasattr(self.unet, "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a_, "_hf_hook" )
and hasattr(module._hf_hook, "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a_ )
def __call__( self : str, a_ : Union[str, List[str]], a_ : Union[torch.FloatTensor, List[torch.FloatTensor]], a_ : Union[torch.FloatTensor, List[torch.FloatTensor]], a_ : Optional[Union[str, List[str]]] = None, a_ : int = 512, a_ : int = 512, a_ : int = 100, a_ : float = 4.0, a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : Optional[torch.FloatTensor] = None, a_ : Optional[str] = "pil", a_ : bool = True, ):
"""simple docstring"""
if isinstance(a_, a_ ):
UpperCamelCase__ = 1
elif isinstance(a_, a_ ):
UpperCamelCase__ = len(a_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(a_ )}' )
UpperCamelCase__ = self._execution_device
UpperCamelCase__ = batch_size * num_images_per_prompt
UpperCamelCase__ = guidance_scale > 1.0
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._encode_prompt(
a_, a_, a_, a_, a_ )
if isinstance(a_, a_ ):
UpperCamelCase__ = torch.cat(a_, dim=0 )
if isinstance(a_, a_ ):
UpperCamelCase__ = torch.cat(a_, dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = image_embeds.repeat_interleave(a_, dim=0 )
UpperCamelCase__ = negative_image_embeds.repeat_interleave(a_, dim=0 )
UpperCamelCase__ = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(
dtype=prompt_embeds.dtype, device=a_ )
self.scheduler.set_timesteps(a_, device=a_ )
UpperCamelCase__ = self.scheduler.timesteps
UpperCamelCase__ = self.unet.config.in_channels
UpperCamelCase__ , UpperCamelCase__ = get_new_h_w(a_, a_, self.movq_scale_factor )
# create initial latent
UpperCamelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, a_, a_, a_, self.scheduler, )
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
UpperCamelCase__ = self.unet(
sample=a_, timestep=a_, encoder_hidden_states=a_, added_cond_kwargs=a_, return_dict=a_, )[0]
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1], dim=1 )
UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__ , UpperCamelCase__ = variance_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
a_, a_, a_, generator=a_, ).prev_sample
# post-processing
UpperCamelCase__ = self.movq.decode(a_, force_not_quantize=a_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCamelCase__ = image * 0.5 + 0.5
UpperCamelCase__ = image.clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : Any = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("This is a test", do_sample=a_ )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
], )
UpperCamelCase__ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a_, [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
], )
UpperCamelCase__ = text_generator("This is a test", do_sample=a_, num_return_sequences=2, return_tensors=a_ )
self.assertEqual(
a_, [
{"generated_token_ids": ANY(a_ )},
{"generated_token_ids": ANY(a_ )},
], )
UpperCamelCase__ = text_generator.model.config.eos_token_id
UpperCamelCase__ = "<pad>"
UpperCamelCase__ = text_generator(
["This is a test", "This is a second test"], do_sample=a_, num_return_sequences=2, batch_size=2, return_tensors=a_, )
self.assertEqual(
a_, [
[
{"generated_token_ids": ANY(a_ )},
{"generated_token_ids": ANY(a_ )},
],
[
{"generated_token_ids": ANY(a_ )},
{"generated_token_ids": ANY(a_ )},
],
], )
@require_tf
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("This is a test", do_sample=a_ )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
], )
UpperCamelCase__ = text_generator(["This is a test", "This is a second test"], do_sample=a_ )
self.assertEqual(
a_, [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
], )
def lowercase_ ( self : Union[str, Any], a_ : Optional[Any], a_ : Any, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = TextGenerationPipeline(model=a_, tokenizer=a_ )
return text_generator, ["This is a test", "Another test"]
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = "Hello I believe in"
UpperCamelCase__ = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2" )
UpperCamelCase__ = text_generator(a_ )
self.assertEqual(
a_, [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}], )
UpperCamelCase__ = text_generator(a_, stop_sequence=" fe" )
self.assertEqual(a_, [{"generated_text": "Hello I believe in fe"}] )
def lowercase_ ( self : int, a_ : Dict, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = text_generator.model
UpperCamelCase__ = text_generator.tokenizer
UpperCamelCase__ = text_generator("This is a test" )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCamelCase__ = text_generator("This is a test", return_full_text=a_ )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
self.assertNotIn("This is a test", outputs[0]["generated_text"] )
UpperCamelCase__ = pipeline(task="text-generation", model=a_, tokenizer=a_, return_full_text=a_ )
UpperCamelCase__ = text_generator("This is a test" )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
self.assertNotIn("This is a test", outputs[0]["generated_text"] )
UpperCamelCase__ = text_generator("This is a test", return_full_text=a_ )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCamelCase__ = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=a_ )
self.assertEqual(
a_, [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
], )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ = text_generator(
["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=a_ )
self.assertEqual(
a_, [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
], )
with self.assertRaises(a_ ):
UpperCamelCase__ = text_generator("test", return_full_text=a_, return_text=a_ )
with self.assertRaises(a_ ):
UpperCamelCase__ = text_generator("test", return_full_text=a_, return_tensors=a_ )
with self.assertRaises(a_ ):
UpperCamelCase__ = text_generator("test", return_text=a_, return_tensors=a_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ = text_generator("" )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase__ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500, max_new_tokens=20 )
UpperCamelCase__ = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a_ ):
text_generator(
"This is a test" * 500, handle_long_generation="hole", max_new_tokens=tokenizer.model_max_length + 10, )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCamelCase__ = pipeline(
model="hf-internal-testing/tiny-random-bloom", model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa}, )
self.assertEqual(pipe.model.device, torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloataa )
UpperCamelCase__ = pipe("This is a test" )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
], )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device, torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloataa )
UpperCamelCase__ = pipe("This is a test" )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
], )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto" )
self.assertEqual(pipe.model.device, torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.floataa )
UpperCamelCase__ = pipe("This is a test" )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
], )
@require_torch
@require_torch_gpu
def lowercase_ ( self : Tuple ):
"""simple docstring"""
import torch
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom", device=0, torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase_ ( self : Any ):
"""simple docstring"""
import torch
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.floataa )
pipe("This is a test", do_sample=a_, top_p=0.5 )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = "Hello world"
UpperCamelCase__ = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
UpperCamelCase__ = logging.get_logger("transformers.generation.tf_utils" )
else:
UpperCamelCase__ = logging.get_logger("transformers.generation.utils" )
UpperCamelCase__ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a_ ) as cl:
UpperCamelCase__ = text_generator(a_, max_length=10, max_new_tokens=1 )
self.assertIn(a_, cl.out )
# The user only sets one -> no warning
with CaptureLogger(a_ ) as cl:
UpperCamelCase__ = text_generator(a_, max_new_tokens=1 )
self.assertNotIn(a_, cl.out )
with CaptureLogger(a_ ) as cl:
UpperCamelCase__ = text_generator(a_, max_length=10 )
self.assertNotIn(a_, cl.out )
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = logging.get_verbosity()
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out, msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out, "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out, msg + "\n" )
# restore to the original level
logging.set_verbosity(a_ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def lowercase_ ( self : str ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = os.getenv("TRANSFORMERS_VERBOSITY", a_ )
UpperCamelCase__ = logging.log_levels[env_level_str]
UpperCamelCase__ = logging.get_verbosity()
self.assertEqual(
a_, a_, f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}', )
# restore to the original level
UpperCamelCase__ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ = logging.logging.getLogger()
with CaptureLogger(a_ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out )
# no need to restore as nothing was changed
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out, "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out, msg + "\n" )
def SCREAMING_SNAKE_CASE__( ) -> Union[str, Any]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
import os
import sys
__lowercase = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__( *_UpperCamelCase : Tuple , **_UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
return AutoConfig.from_pretrained(*_UpperCamelCase , **_UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__( *_UpperCamelCase : List[str] , **_UpperCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_UpperCamelCase , **_UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__( *_UpperCamelCase : List[Any] , **_UpperCamelCase : Any ) -> int:
'''simple docstring'''
return AutoModel.from_pretrained(*_UpperCamelCase , **_UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__( *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_UpperCamelCase , **_UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__( *_UpperCamelCase : List[str] , **_UpperCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_UpperCamelCase , **_UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__( *_UpperCamelCase : List[Any] , **_UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_UpperCamelCase , **_UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__( *_UpperCamelCase : Dict , **_UpperCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_UpperCamelCase , **_UpperCamelCase )
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
def __init__( self : Dict, a_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(a_ ) != 0:
UpperCamelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(a_ ) != cols:
raise error
for value in row:
if not isinstance(a_, (int, float) ):
raise error
UpperCamelCase__ = rows
else:
UpperCamelCase__ = []
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ ( self : Any, a_ : int, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(a_ ).determinant()
def lowercase_ ( self : int, a_ : int, a_ : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(a_, a_ )
return -1 * self.get_minor(a_, a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(a_, a_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[str] ):
"""simple docstring"""
return str(self.rows )
def __str__( self : int ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(a_ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ ( self : int, a_ : list[int], a_ : int | None = None ):
"""simple docstring"""
UpperCamelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(a_, a_ ):
raise type_error
for value in row:
if not isinstance(a_, (int, float) ):
raise type_error
if len(a_ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(a_ )
else:
UpperCamelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ ( self : str, a_ : list[int], a_ : int | None = None ):
"""simple docstring"""
UpperCamelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(a_, a_ ):
raise type_error
for value in column:
if not isinstance(a_, (int, float) ):
raise type_error
if len(a_ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCamelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCamelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : int, a_ : object ):
"""simple docstring"""
if not isinstance(a_, a_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : int, a_ : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : Optional[Any], a_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any, a_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple, a_ : Matrix | int | float ):
"""simple docstring"""
if isinstance(a_, (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(a_, a_ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(a_, a_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : int, a_ : int ):
"""simple docstring"""
if not isinstance(a_, a_ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCamelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ ( cls : str, a_ : list[int], a_ : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
@register_to_config
def __init__( self : List[str], a_ : int, a_ : int, a_ : int, a_ : float, a_ : int, a_ : int, a_ : int, a_ : int, a_ : str, a_ : bool = False, ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = nn.Embedding(a_, a_ )
UpperCamelCase__ = nn.Embedding(a_, a_ )
UpperCamelCase__ = False
UpperCamelCase__ = nn.Dropout(p=a_ )
UpperCamelCase__ = TaConfig(
vocab_size=a_, d_model=a_, num_heads=a_, d_kv=a_, d_ff=a_, dropout_rate=a_, feed_forward_proj=a_, is_decoder=a_, is_encoder_decoder=a_, )
UpperCamelCase__ = nn.ModuleList()
for lyr_num in range(a_ ):
UpperCamelCase__ = TaBlock(a_ )
self.encoders.append(a_ )
UpperCamelCase__ = TaLayerNorm(a_ )
UpperCamelCase__ = nn.Dropout(p=a_ )
def lowercase_ ( self : str, a_ : Any, a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.token_embedder(a_ )
UpperCamelCase__ = encoder_input_tokens.shape[1]
UpperCamelCase__ = torch.arange(a_, device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
UpperCamelCase__ = self.dropout_pre(a_ )
# inverted the attention mask
UpperCamelCase__ = encoder_input_tokens.size()
UpperCamelCase__ = self.get_extended_attention_mask(a_, a_ )
for lyr in self.encoders:
UpperCamelCase__ = lyr(a_, a_ )[0]
UpperCamelCase__ = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: Optional[Any] = logging.get_logger(__name__)
__lowercase: List[Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = 'mgp-str'
def __init__( self : Optional[int], a_ : Tuple=[32, 128], a_ : str=4, a_ : int=3, a_ : Optional[int]=27, a_ : str=38, a_ : Dict=5_0257, a_ : List[Any]=3_0522, a_ : Tuple=768, a_ : Union[str, Any]=12, a_ : int=12, a_ : Optional[Any]=4.0, a_ : str=True, a_ : Union[str, Any]=False, a_ : int=1e-5, a_ : Any=0.0, a_ : Dict=0.0, a_ : List[str]=0.0, a_ : Any=False, a_ : Tuple=0.02, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = max_token_length
UpperCamelCase__ = num_character_labels
UpperCamelCase__ = num_bpe_labels
UpperCamelCase__ = num_wordpiece_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = distilled
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = drop_rate
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = attn_drop_rate
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = output_aa_attentions
UpperCamelCase__ = initializer_range
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase: Dict = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = AlbertTokenizer
_lowerCamelCase : str = AlbertTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = True
_lowerCamelCase : List[str] = True
def lowercase_ ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = AlbertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : List[Any], a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = "this is a test"
UpperCamelCase__ = "this is a test"
return input_text, output_text
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = "<pad>"
UpperCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<pad>" )
self.assertEqual(vocab_keys[1], "<unk>" )
self.assertEqual(vocab_keys[-1], "▁eloquent" )
self.assertEqual(len(a_ ), 3_0000 )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000 )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = "I was born in 92000, and this is falsé."
UpperCamelCase__ = tokenizer.tokenize(a_ )
UpperCamelCase__ = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokenizer.encode(a_, add_special_tokens=a_ )
UpperCamelCase__ = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(a_ )
UpperCamelCase__ = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer(a_, keep_accents=a_ )
UpperCamelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_, ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), [48, 25, 21, 1289] )
UpperCamelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_, [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."], )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer(a_ )
UpperCamelCase__ = tokenizer.encode("sequence builders" )
UpperCamelCase__ = tokenizer.encode("multi-sequence build" )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a_, a_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="albert-base-v2", revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e", )
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase :
def __init__( self : Any, a_ : Union[str, Any], a_ : Tuple=13, a_ : Any=7, a_ : Any=True, a_ : Dict=True, a_ : Any=False, a_ : str=True, a_ : List[Any]=99, a_ : Union[str, Any]=64, a_ : List[Any]=5, a_ : Dict=4, a_ : Dict=64, a_ : List[Any]="gelu", a_ : List[Any]=0.1, a_ : Tuple=0.1, a_ : Union[str, Any]=512, a_ : int=16, a_ : Optional[Any]=2, a_ : Dict=0.02, a_ : List[Any]=3, a_ : int=4, a_ : Any=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def lowercase_ ( self : Optional[int], a_ : Dict, a_ : Any, a_ : Optional[int], a_ : Union[str, Any], a_ : Optional[int], a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = MPNetModel(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, a_ )
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase_ ( self : Union[str, Any], a_ : Optional[int], a_ : Dict, a_ : Dict, a_ : int, a_ : Any, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = MPNetForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(
a_, attention_mask=a_, start_positions=a_, end_positions=a_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : Optional[Any], a_ : List[str], a_ : Optional[Any], a_ : Union[str, Any], a_ : Dict, a_ : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MPNetForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Union[str, Any], a_ : Optional[Any], a_ : List[Any], a_ : Tuple, a_ : Tuple, a_ : Any, a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = MPNetForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCamelCase__ = model(
a_, attention_mask=a_, labels=a_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase_ ( self : List[str], a_ : Union[str, Any], a_ : Dict, a_ : List[Any], a_ : Any, a_ : List[Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MPNetForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, attention_mask=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : int = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_lowerCamelCase : str = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Any = False
_lowerCamelCase : Any = True
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = MPNetModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, hidden_size=37 )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a_ )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a_ )
@require_torch
class UpperCAmelCase ( unittest.TestCase):
@slow
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCamelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase__ = model(a_ )[0]
UpperCamelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, a_ )
UpperCamelCase__ = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3], a_, atol=1e-4 ) )
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if openai_config_file == "":
UpperCamelCase__ = OpenAIGPTConfig()
else:
UpperCamelCase__ = OpenAIGPTConfig.from_json_file(_UpperCamelCase )
UpperCamelCase__ = OpenAIGPTModel(_UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
UpperCamelCase__ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _UpperCamelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__lowercase: int = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase: Tuple = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[str] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Union[str, Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[str] = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase: int = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : List[Any], *a_ : Optional[Any], **a_ : Tuple ):
"""simple docstring"""
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead.", a_, )
super().__init__(*a_, **a_ )
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCAmelCase__ ) as metadata_file:
lowercase = json.load(lowerCAmelCase__ )
lowercase = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
lowercase = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
lowercase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowercase = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase = AddedToken("""<ent>""" , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
lowercase = AddedToken("""<ent2>""" , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , """r""" ) as f:
lowercase = json.load(lowerCAmelCase__ )
lowercase = """MLukeTokenizer"""
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
lowercase = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
lowercase = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
lowercase = state_dict["""embeddings.word_embeddings.weight"""]
lowercase = word_emb[ent_init_index].unsqueeze(0 )
lowercase = word_emb[enta_init_index].unsqueeze(0 )
lowercase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowercase = state_dict[bias_name]
lowercase = decoder_bias[ent_init_index].unsqueeze(0 )
lowercase = decoder_bias[enta_init_index].unsqueeze(0 )
lowercase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase = f'encoder.layer.{layer_index}.attention.self.'
lowercase = state_dict[prefix + matrix_name]
lowercase = state_dict[prefix + matrix_name]
lowercase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowercase = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowercase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowercase = state_dict["""entity_predictions.bias"""]
lowercase = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowercase = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowercase = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
lowercase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
lowercase = state_dict[key]
else:
lowercase = state_dict[key]
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowercase = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task="""entity_classification""" )
lowercase = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowercase = (0, 9)
lowercase = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors="""pt""" )
lowercase = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase = torch.Size((1, 3_3, 7_6_8) )
lowercase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase = torch.Size((1, 1, 7_6_8) )
lowercase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowercase = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
lowercase = """Tokyo is the capital of <mask>."""
lowercase = (2_4, 3_0)
lowercase = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors="""pt""" )
lowercase = model(**lowerCAmelCase__ )
lowercase = encoding["""input_ids"""][0].tolist()
lowercase = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
lowercase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
lowercase = outputs.entity_logits[0][0].argmax().item()
lowercase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowercase = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
lowercase = {}
for entry in data:
lowercase = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowercase = entity_id
break
lowercase = f'{language}:{entity_name}'
lowercase = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__lowerCAmelCase : Any =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 32
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32
| 1
|
"""simple docstring"""
from PIL import Image
def UpperCAmelCase__ ( lowerCAmelCase__ :Image ) -> Image:
'''simple docstring'''
lowercase , lowercase = image.size
lowercase = 0
lowercase = image.load()
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCAmelCase : Tuple =mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 32
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
lowercase = os.path.abspath(lowerCAmelCase__ )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
lowercase = tf.train.list_variables(lowerCAmelCase__ )
lowercase = []
lowercase = []
lowercase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase = name[1:]
# figure out how many levels deep the name is
lowercase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase__ )
# read data
lowercase = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
names.append("""/""".join(lowerCAmelCase__ ) )
arrays.append(lowerCAmelCase__ )
logger.info(f'Read a total of {len(lowerCAmelCase__ ):,} layers' )
# Sanity check
if len(set(lowerCAmelCase__ ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})' )
lowercase = list(set(lowerCAmelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = full_name.split("""/""" )
lowercase = model
lowercase = []
for i, m_name in enumerate(lowerCAmelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowercase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowercase = getattr(lowerCAmelCase__ , """embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowercase = getattr(lowerCAmelCase__ , """encoder""" )
lowercase = getattr(lowerCAmelCase__ , """layer""" )
lowercase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowercase = getattr(lowerCAmelCase__ , """pooler""" )
lowercase = getattr(lowerCAmelCase__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """token_type_embeddings""" )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append("""weight""" )
lowercase = getattr(lowerCAmelCase__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowercase = getattr(lowerCAmelCase__ , """attention""" )
lowercase = getattr(lowerCAmelCase__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowercase = getattr(lowerCAmelCase__ , """attention""" )
lowercase = getattr(lowerCAmelCase__ , """output""" )
lowercase = getattr(lowerCAmelCase__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowercase = getattr(lowerCAmelCase__ , """attention""" )
lowercase = getattr(lowerCAmelCase__ , """output""" )
lowercase = getattr(lowerCAmelCase__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowercase = getattr(lowerCAmelCase__ , """output""" )
lowercase = getattr(lowerCAmelCase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowercase = getattr(lowerCAmelCase__ , """output""" )
lowercase = getattr(lowerCAmelCase__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowercase = getattr(lowerCAmelCase__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowercase = getattr(lowerCAmelCase__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowercase = getattr(lowerCAmelCase__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowercase = getattr(lowerCAmelCase__ , """intermediate""" )
lowercase = getattr(lowerCAmelCase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowercase = getattr(lowerCAmelCase__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowercase = getattr(lowerCAmelCase__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowercase = getattr(lowerCAmelCase__ , """weight""" )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
lowercase = """.""".join(lowerCAmelCase__ )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowerCAmelCase__ ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , lowerCAmelCase__ ):
lowercase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase = array.transpose()
if pointer.shape == array.shape:
lowercase = torch.from_numpy(lowerCAmelCase__ )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
logger.info(f'Loading model based on config from {config_path}...' )
lowercase = BertConfig.from_json_file(lowerCAmelCase__ )
lowercase = BertModel(lowerCAmelCase__ )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 32
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return 1 if input_a == input_a else 0
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 32
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32
| 1
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def UpperCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=lowerCAmelCase__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=lowerCAmelCase__ , default=5 )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=lowerCAmelCase__ , default=1 )
parser.add_argument("""--freeze""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--learning_rate""" , type=lowerCAmelCase__ , default=5e-4 )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=lowerCAmelCase__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=lowerCAmelCase__ , default=1_0 )
parser.add_argument("""--weight_decay""" , type=lowerCAmelCase__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , default="""./results""" )
return parser.parse_args()
__lowerCAmelCase : Dict =load("""accuracy""")
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
lowercase , lowercase = eval_pred
lowercase = np.argmax(lowerCAmelCase__ , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowercase = trainer
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
if control.should_evaluate:
lowercase = deepcopy(__lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
lowercase = get_args()
set_seed(args.seed )
lowercase = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
lowercase = dataset.train_test_split(test_size=0.2 )
lowercase = train_test["""test"""].train_test_split(test_size=0.5 )
lowercase = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
lowercase = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase = tokenizer.eos_token
lowercase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowercase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase = False
lowercase = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(lowerCAmelCase__ :str ):
lowercase = tokenizer(example["""src"""] , truncation=lowerCAmelCase__ , max_length=1_0_2_4 )
lowercase = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase = train_test_validation.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=train_test_validation["""train"""].column_names , )
lowercase = DataCollatorWithPadding(tokenizer=lowerCAmelCase__ )
lowercase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
lowercase = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(lowerCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 32
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase = 1_0_2_4
lowercase = 4_0_9_6
lowercase = 2_4
lowercase = 1_6
lowercase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = False
lowercase = """relu"""
lowercase = 1_0_2_4
lowercase = True
lowercase = False
lowercase = False
# load HuggingFace model
lowercase = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
lowercase = TrOCRForCausalLM(lowerCAmelCase__ )
lowercase = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowerCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowercase = val
else:
lowercase = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = ViTImageProcessor(size=encoder_config.image_size )
lowercase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowercase = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
lowercase = outputs.logits
lowercase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(lowerCAmelCase__ ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 1_0_0_0 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 32
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32
| 1
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__lowerCAmelCase : int =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 32
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = ['pixel_values']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BILINEAR , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = size if size is not None else {"""shortest_edge""": 256}
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase = get_size_dict(__lowerCAmelCase )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(__lowerCAmelCase )
return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(__lowerCAmelCase )
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 32
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
__lowerCAmelCase : Dict =tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase__ , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase__ , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase__ ).group(1 )
lowercase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
lowercase = count_samples(lowerCAmelCase__ )
lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
lowercase = dataset.shuffle(len(lowerCAmelCase__ ) )
lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
lowercase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
lowercase = initialize_tpu(lowerCAmelCase__ )
lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase = count_samples(lowerCAmelCase__ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ :Any ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ :Dict ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase , lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =parse_args()
main(args)
| 32
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = 'switch_transformers'
snake_case__ : Tuple = ['past_key_values']
snake_case__ : Tuple = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , __lowerCAmelCase=3_2128 , __lowerCAmelCase=768 , __lowerCAmelCase=64 , __lowerCAmelCase=2048 , __lowerCAmelCase=64 , __lowerCAmelCase=12 , __lowerCAmelCase=3 , __lowerCAmelCase=12 , __lowerCAmelCase=3 , __lowerCAmelCase=12 , __lowerCAmelCase=8 , __lowerCAmelCase=False , __lowerCAmelCase=0.0_1 , __lowerCAmelCase="float32" , __lowerCAmelCase=False , __lowerCAmelCase=32 , __lowerCAmelCase=128 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=0.0_0_1 , __lowerCAmelCase=0.0_0_1 , __lowerCAmelCase=1.0 , __lowerCAmelCase="relu" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = d_model
lowercase = d_kv
lowercase = d_ff
lowercase = num_sparse_encoder_layers
lowercase = num_layers
lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase = num_heads
lowercase = num_experts
lowercase = expert_capacity
lowercase = router_bias
lowercase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
lowercase = router_dtype
lowercase = router_ignore_padding_tokens
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = dropout_rate
lowercase = layer_norm_epsilon
lowercase = initializer_factor
lowercase = feed_forward_proj
lowercase = use_cache
lowercase = add_router_probs
lowercase = router_z_loss_coef
lowercase = router_aux_loss_coef
lowercase = self.feed_forward_proj.split("""-""" )
lowercase = act_info[-1]
lowercase = act_info[0] == """gated"""
if len(__lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(__lowerCAmelCase ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase = """gelu_new"""
super().__init__(
pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase , )
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A ( lowerCAmelCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase , """num_heads""" ) )
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=3 , __lowerCAmelCase=[16, 48, 96] , __lowerCAmelCase=[1, 3, 6] , __lowerCAmelCase=[1, 2, 10] , __lowerCAmelCase=[7, 3, 3] , __lowerCAmelCase=[4, 2, 2] , __lowerCAmelCase=[2, 1, 1] , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=[False, False, True] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=2 , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_sizes
lowercase = patch_stride
lowercase = patch_padding
lowercase = is_training
lowercase = use_labels
lowercase = num_labels
lowercase = num_channels
lowercase = embed_dim
lowercase = num_heads
lowercase = stride_kv
lowercase = depth
lowercase = cls_token
lowercase = attention_drop_rate
lowercase = initializer_range
lowercase = layer_norm_eps
def A__ ( self ):
"""simple docstring"""
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
# create a random int32 tensor of given shape
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = TFCvtModel(config=__lowerCAmelCase )
lowercase = model(__lowerCAmelCase , training=__lowerCAmelCase )
lowercase = (self.image_size, self.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = TFCvtForImageClassification(__lowerCAmelCase )
lowercase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : int = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
snake_case__ : Dict = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Tuple = False
snake_case__ : Any = False
snake_case__ : int = False
def A__ ( self ):
"""simple docstring"""
lowercase = TFCvtModelTester(self )
lowercase = TFCvtConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def A__ ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def A__ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def A__ ( self ):
"""simple docstring"""
lowercase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__lowerCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase = model_class(__lowerCAmelCase )
lowercase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = len(self.model_tester.depth )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFCvtModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def A__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" )
# forward pass
lowercase = model(**__lowerCAmelCase )
# verify the logits
lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCAmelCase , atol=1E-4 ) )
| 32
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32
| 1
|
"""simple docstring"""
from math import factorial, pi
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :int = 3_0 ) -> float:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowercase = float(lowerCAmelCase__ )
lowercase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase__ ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :int = 3_0 ) -> float:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowercase = float(lowerCAmelCase__ )
lowercase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 32
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
__lowerCAmelCase : Dict =tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase__ , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase__ , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase__ ).group(1 )
lowercase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
lowercase = count_samples(lowerCAmelCase__ )
lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
lowercase = dataset.shuffle(len(lowerCAmelCase__ ) )
lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
lowercase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
lowercase = initialize_tpu(lowerCAmelCase__ )
lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase = count_samples(lowerCAmelCase__ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ :Any ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ :Dict ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase , lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =parse_args()
main(args)
| 32
|
"""simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Dict ={
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32
| 1
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32
| 1
|
"""simple docstring"""
import argparse
import struct
import unittest
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = data
# Initialize hash values
lowercase = [
0X6a_09e_667,
0Xbb_67a_e85,
0X3c_6ef_372,
0Xa5_4ff_53a,
0X51_0e5_27f,
0X9b_056_88c,
0X1f_83d_9ab,
0X5b_e0c_d19,
]
# Initialize round constants
lowercase = [
0X42_8a2_f98,
0X71_374_491,
0Xb5_c0f_bcf,
0Xe9_b5d_ba5,
0X39_56c_25b,
0X59_f11_1f1,
0X92_3f8_2a4,
0Xab_1c5_ed5,
0Xd8_07a_a98,
0X12_835_b01,
0X24_318_5be,
0X55_0c7_dc3,
0X72_be5_d74,
0X80_deb_1fe,
0X9b_dc0_6a7,
0Xc1_9bf_174,
0Xe4_9b6_9c1,
0Xef_be4_786,
0X0f_c19_dc6,
0X24_0ca_1cc,
0X2d_e92_c6f,
0X4a_748_4aa,
0X5c_b0a_9dc,
0X76_f98_8da,
0X98_3e5_152,
0Xa8_31c_66d,
0Xb0_032_7c8,
0Xbf_597_fc7,
0Xc6_e00_bf3,
0Xd5_a79_147,
0X06_ca6_351,
0X14_292_967,
0X27_b70_a85,
0X2e_1b2_138,
0X4d_2c6_dfc,
0X53_380_d13,
0X65_0a7_354,
0X76_6a0_abb,
0X81_c2c_92e,
0X92_722_c85,
0Xa2_bfe_8a1,
0Xa8_1a6_64b,
0Xc2_4b8_b70,
0Xc7_6c5_1a3,
0Xd1_92e_819,
0Xd6_990_624,
0Xf4_0e3_585,
0X10_6aa_070,
0X19_a4c_116,
0X1e_376_c08,
0X27_487_74c,
0X34_b0b_cb5,
0X39_1c0_cb3,
0X4e_d8a_a4a,
0X5b_9cc_a4f,
0X68_2e6_ff3,
0X74_8f8_2ee,
0X78_a56_36f,
0X84_c87_814,
0X8c_c70_208,
0X90_bef_ffa,
0Xa4_506_ceb,
0Xbe_f9a_3f7,
0Xc6_717_8f2,
]
lowercase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( __lowerCAmelCase ):
"""simple docstring"""
lowercase = B"""\x80""" + (B"""\x00""" * (63 - (len(__lowerCAmelCase ) + 8) % 64))
lowercase = struct.pack(""">Q""" , (len(__lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ):
"""simple docstring"""
lowercase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase = list(struct.unpack(""">16L""" , __lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowercase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowercase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
lowercase = self.ror(__lowerCAmelCase , 6 ) ^ self.ror(__lowerCAmelCase , 11 ) ^ self.ror(__lowerCAmelCase , 25 )
lowercase = (e & f) ^ ((~e & 0Xff_fff_fff) & g)
lowercase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
lowercase = self.ror(__lowerCAmelCase , 2 ) ^ self.ror(__lowerCAmelCase , 13 ) ^ self.ror(__lowerCAmelCase , 22 )
lowercase = (a & b) ^ (a & c) ^ (b & c)
lowercase = (sa + maj) % 0X100_000_000
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
lowercase = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
lowercase = """""".join([hex(__lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
import hashlib
lowercase = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(__lowerCAmelCase ).hash , hashlib.shaaaa(__lowerCAmelCase ).hexdigest() )
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
lowercase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowercase = parser.parse_args()
lowercase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowercase = f.read()
else:
lowercase = bytes(lowerCAmelCase__ , """utf-8""" )
print(SHAaaa(lowerCAmelCase__ ).hash )
if __name__ == "__main__":
main()
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = credit_card_number
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
lowercase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowercase = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCAmelCase__ ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int ) -> list[list[int]]:
'''simple docstring'''
lowercase = []
lowercase = []
lowercase = 0
lowercase = sum(lowerCAmelCase__ )
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return result
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[list[int]] , lowerCAmelCase__ :int , ) -> None:
'''simple docstring'''
if sum(lowerCAmelCase__ ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase__ )) < max_sum:
return
if sum(lowerCAmelCase__ ) == max_sum:
result.append(lowerCAmelCase__ )
return
for index in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
create_state_space_tree(
lowerCAmelCase__ , lowerCAmelCase__ , index + 1 , [*path, nums[index]] , lowerCAmelCase__ , remaining_nums_sum - nums[index] , )
__lowerCAmelCase : Dict =[3, 3_4, 4, 1_2, 5, 2]
__lowerCAmelCase : str =9
__lowerCAmelCase : Optional[Any] =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 32
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
def extract(*__lowerCAmelCase , **__lowerCAmelCase ):
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = torch.ones([0] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(pipe.scheduler , __lowerCAmelCase )
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowercase = 40_0366_0346
lowercase = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowercase = 27_3497_1755
lowercase = 7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowercase = 10_4435_5234
lowercase = 12
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 32
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A :
snake_case__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
snake_case__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class _A :
snake_case__ : Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def A__ ( self ):
"""simple docstring"""
if self.train_file is not None:
lowercase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowercase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _A :
snake_case__ : PreTrainedTokenizerBase
snake_case__ : Union[bool, str, PaddingStrategy] = True
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """label""" if """label""" in features[0].keys() else """labels"""
lowercase = [feature.pop(__lowerCAmelCase ) for feature in features]
lowercase = len(__lowerCAmelCase )
lowercase = len(features[0]["""input_ids"""] )
lowercase = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCAmelCase )] for feature in features
]
lowercase = list(chain(*__lowerCAmelCase ) )
lowercase = self.tokenizer.pad(
__lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
lowercase = {k: v.view(__lowerCAmelCase , __lowerCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowercase = torch.tensor(__lowerCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowercase = {}
if data_args.train_file is not None:
lowercase = data_args.train_file
if data_args.validation_file is not None:
lowercase = data_args.validation_file
lowercase = data_args.train_file.split(""".""" )[-1]
lowercase = load_dataset(
lowerCAmelCase__ , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowercase = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowercase = [f'ending{i}' for i in range(4 )]
lowercase = """sent1"""
lowercase = """sent2"""
if data_args.max_seq_length is None:
lowercase = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
lowercase = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowercase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase__ :Optional[Any] ):
lowercase = [[context] * 4 for context in examples[context_name]]
lowercase = examples[question_header_name]
lowercase = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(lowerCAmelCase__ )
]
# Flatten out
lowercase = list(chain(*lowerCAmelCase__ ) )
lowercase = list(chain(*lowerCAmelCase__ ) )
# Tokenize
lowercase = tokenizer(
lowerCAmelCase__ , lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowercase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowercase = min(len(lowerCAmelCase__ ) , data_args.max_train_samples )
lowercase = train_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowercase = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowercase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowercase = min(len(lowerCAmelCase__ ) , data_args.max_eval_samples )
lowercase = eval_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowercase = eval_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowercase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase__ :Optional[Any] ):
lowercase , lowercase = eval_predictions
lowercase = np.argmax(lowerCAmelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowercase = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase = train_result.metrics
lowercase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowercase = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics("""train""" , lowerCAmelCase__ )
trainer.save_metrics("""train""" , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate()
lowercase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowercase = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase__ )
trainer.save_metrics("""eval""" , lowerCAmelCase__ )
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.