code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def a ( A__ : np.ndarray , A__ : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
_lowercase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowercase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a ( A__ : FilterType , A__ : int ) -> None:
"""simple docstring"""
_lowercase =512
_lowercase =[1] + [0] * (size - 1)
_lowercase =[filter_type.process(A__ ) for item in inputs]
_lowercase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase =np.abs(np.fft.fft(A__ ) )
_lowercase =20 * np.logaa(A__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_lowercase =get_bounds(A__ , A__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(A__ )
plt.show()
def a ( A__ : FilterType , A__ : int ) -> None:
"""simple docstring"""
_lowercase =512
_lowercase =[1] + [0] * (size - 1)
_lowercase =[filter_type.process(A__ ) for item in inputs]
_lowercase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase =np.angle(np.fft.fft(A__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(A__ , -2 * pi ) )
plt.show()
| 205
|
import argparse
import os
import re
lowercase_ = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowercase_ = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ = re.compile(R'\[([^\]]+)\]')
def a ( A__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowercase =_re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def a ( A__ : Optional[Any] , A__ : Dict="" , A__ : Union[str, Any]=None , A__ : Tuple=None ) -> Dict:
"""simple docstring"""
_lowercase =0
_lowercase =code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
_lowercase =['\n'.join(lines[:index] )]
else:
_lowercase =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase =[lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(A__ ) )
if index < len(A__ ) - 1:
_lowercase =[lines[index + 1]]
index += 1
else:
_lowercase =[]
else:
blocks.append('\n'.join(A__ ) )
_lowercase =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('\n'.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def a ( A__ : int ) -> Union[str, Any]:
"""simple docstring"""
def _inner(A__ : Any ):
return key(A__ ).lower().replace('_' , '' )
return _inner
def a ( A__ : Any , A__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
def noop(A__ : Optional[int] ):
return x
if key is None:
_lowercase =noop
# Constants are all uppercase, they go first.
_lowercase =[obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase =[obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase =[obj for obj in objects if not key(A__ )[0].isupper()]
_lowercase =ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def a ( A__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(A__ : Optional[int] ):
_lowercase =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowercase =[part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(A__ )] ) + "]"
_lowercase =import_statement.split('\n' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase =2 if lines[1].strip() == '[' else 1
_lowercase =[(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase =sort_objects(A__ , key=lambda A__ : x[1] )
_lowercase =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase =_re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase =[part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
_lowercase =get_indent(lines[1] ) + ', '.join([F'''"{k}"''' for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
_lowercase =_re_bracket_content.sub(_replace , A__ )
return import_statement
def a ( A__ : Dict , A__ : int=True ) -> Optional[Any]:
"""simple docstring"""
with open(A__ , encoding='utf-8' ) as f:
_lowercase =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase =split_code_in_indented_blocks(
A__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase =main_blocks[block_idx]
_lowercase =block.split('\n' )
# Get to the start of the imports.
_lowercase =0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase =len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase ='\n'.join(block_lines[line_idx:-1] )
_lowercase =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase =split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase =_re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase =[(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase =[(i, key) for i, key in enumerate(A__ ) if key is not None]
_lowercase =[x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase =0
_lowercase =[]
for i in range(len(A__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
_lowercase ='\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A__ ) )
def a ( A__ : List[Any]=True ) -> List[str]:
"""simple docstring"""
_lowercase =[]
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
_lowercase =sort_imports(os.path.join(A__ , '__init__.py' ) , check_only=A__ )
if result:
_lowercase =[os.path.join(A__ , '__init__.py' )]
if len(A__ ) > 0:
raise ValueError(F'''Would overwrite {len(A__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowercase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 205
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase: Tuple = logging.get_logger(__name__)
__lowercase: Union[str, Any] = {"vocab_file": "spiece.model"}
__lowercase: Tuple = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
__lowercase: str = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
__lowercase: List[str] = "▁"
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any], a_ : List[Any], a_ : str=True, a_ : Union[str, Any]=True, a_ : Tuple=False, a_ : Any="[CLS]", a_ : Optional[Any]="[SEP]", a_ : str="<unk>", a_ : List[str]="[SEP]", a_ : str="<pad>", a_ : List[Any]="[CLS]", a_ : Dict="[MASK]", a_ : Optional[Dict[str, Any]] = None, **a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = (
AddedToken(a_, lstrip=a_, rstrip=a_, normalized=a_ )
if isinstance(a_, a_ )
else mask_token
)
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_, remove_space=a_, keep_accents=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = remove_space
UpperCamelCase__ = keep_accents
UpperCamelCase__ = vocab_file
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return len(self.sp_model )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self : Optional[Any], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : List[Any], a_ : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCamelCase__ = " ".join(inputs.strip().split() )
else:
UpperCamelCase__ = inputs
UpperCamelCase__ = outputs.replace("``", "\"" ).replace("''", "\"" )
if not self.keep_accents:
UpperCamelCase__ = unicodedata.normalize("NFKD", a_ )
UpperCamelCase__ = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
UpperCamelCase__ = outputs.lower()
return outputs
def lowercase_ ( self : Any, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self.preprocess_text(a_ )
UpperCamelCase__ = self.sp_model.encode(a_, out_type=a_ )
UpperCamelCase__ = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
UpperCamelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_, "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase__ = cur_pieces[1:]
else:
UpperCamelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def lowercase_ ( self : Any, a_ : Optional[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(a_ )
def lowercase_ ( self : Optional[Any], a_ : Optional[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(a_ )
def lowercase_ ( self : Optional[int], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = ""
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(a_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def lowercase_ ( self : Tuple, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : str, a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1]
def lowercase_ ( self : int, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Optional[Any], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
class a_ :
'''simple docstring'''
def __init__( self , A ) -> None:
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = [0] * size
_SCREAMING_SNAKE_CASE = [0] * size
@staticmethod
def snake_case_( A ) -> int:
return index | (index + 1)
@staticmethod
def snake_case_( A ) -> int:
return (index & (index + 1)) - 1
def snake_case_( self , A , A ) -> None:
_SCREAMING_SNAKE_CASE = value
while index < self.size:
_SCREAMING_SNAKE_CASE = self.get_prev(A ) + 1
if current_left_border == index:
_SCREAMING_SNAKE_CASE = value
else:
_SCREAMING_SNAKE_CASE = max(A , A , A )
_SCREAMING_SNAKE_CASE = self.get_next(A )
def snake_case_( self , A , A ) -> int:
right -= 1 # Because of right is exclusive
_SCREAMING_SNAKE_CASE = 0
while left <= right:
_SCREAMING_SNAKE_CASE = self.get_prev(A )
if left <= current_left:
_SCREAMING_SNAKE_CASE = max(A , self.tree[right] )
_SCREAMING_SNAKE_CASE = current_left
else:
_SCREAMING_SNAKE_CASE = max(A , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase ( __lowerCamelCase : Tuple ) ->Tuple:
_SCREAMING_SNAKE_CASE = fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , __lowerCamelCase ).groups()[0]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A=None , A=None ) -> int:
_SCREAMING_SNAKE_CASE = file_names
_SCREAMING_SNAKE_CASE = image_transform
_SCREAMING_SNAKE_CASE = label_to_id
def __len__( self ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.file_names[idx]
_SCREAMING_SNAKE_CASE = PIL.Image.open(A )
_SCREAMING_SNAKE_CASE = raw_image.convert("""RGB""" )
if self.image_transform is not None:
_SCREAMING_SNAKE_CASE = self.image_transform(A )
_SCREAMING_SNAKE_CASE = extract_label(A )
if self.label_to_id is not None:
_SCREAMING_SNAKE_CASE = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) ->str:
# Initialize accelerator
if args.with_tracking:
_SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
_SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["""lr"""]
_SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE = config["""image_size"""]
if not isinstance(__lowerCamelCase , (list, tuple) ):
_SCREAMING_SNAKE_CASE = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
_SCREAMING_SNAKE_CASE = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_SCREAMING_SNAKE_CASE = int(args.checkpointing_steps )
else:
raise ValueError(
F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_SCREAMING_SNAKE_CASE = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_SCREAMING_SNAKE_CASE = os.path.split(__lowerCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__lowerCamelCase , __lowerCamelCase )
# Grab all the image filenames
_SCREAMING_SNAKE_CASE = [os.path.join(args.data_dir , __lowerCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
_SCREAMING_SNAKE_CASE = [extract_label(__lowerCamelCase ) for fname in file_names]
_SCREAMING_SNAKE_CASE = list(set(__lowerCamelCase ) )
id_to_label.sort()
_SCREAMING_SNAKE_CASE = {lbl: i for i, lbl in enumerate(__lowerCamelCase )}
# Set the seed before splitting the data.
np.random.seed(__lowerCamelCase )
torch.manual_seed(__lowerCamelCase )
torch.cuda.manual_seed_all(__lowerCamelCase )
# Split our filenames between train and validation
_SCREAMING_SNAKE_CASE = np.random.permutation(len(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = int(0.8 * len(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = random_perm[:cut]
_SCREAMING_SNAKE_CASE = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_SCREAMING_SNAKE_CASE = Compose([RandomResizedCrop(__lowerCamelCase , scale=(0.5, 1.0) ), ToTensor()] )
_SCREAMING_SNAKE_CASE = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase )
# For evaluation, we use a deterministic Resize
_SCREAMING_SNAKE_CASE = Compose([Resize(__lowerCamelCase ), ToTensor()] )
_SCREAMING_SNAKE_CASE = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
_SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = create_model("""resnet50d""" , pretrained=__lowerCamelCase , num_classes=len(__lowerCamelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_SCREAMING_SNAKE_CASE = False
for param in model.get_classifier().parameters():
_SCREAMING_SNAKE_CASE = True
# We normalize the batches of images to be a bit faster.
_SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
_SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_SCREAMING_SNAKE_CASE = OneCycleLR(optimizer=__lowerCamelCase , max_lr=__lowerCamelCase , epochs=__lowerCamelCase , steps_per_epoch=len(__lowerCamelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the starting epoch so files are named properly
_SCREAMING_SNAKE_CASE = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_SCREAMING_SNAKE_CASE = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_SCREAMING_SNAKE_CASE = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_SCREAMING_SNAKE_CASE = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_SCREAMING_SNAKE_CASE = os.path.splitext(__lowerCamelCase )[0]
if "epoch" in training_difference:
_SCREAMING_SNAKE_CASE = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
_SCREAMING_SNAKE_CASE = None
else:
_SCREAMING_SNAKE_CASE = int(training_difference.replace("""step_""" , """""" ) )
_SCREAMING_SNAKE_CASE = resume_step // len(__lowerCamelCase )
resume_step -= starting_epoch * len(__lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
if args.with_tracking:
_SCREAMING_SNAKE_CASE = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_SCREAMING_SNAKE_CASE = accelerator.skip_first_batches(__lowerCamelCase , __lowerCamelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_SCREAMING_SNAKE_CASE = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
_SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.nn.functional.cross_entropy(__lowerCamelCase , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = F'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
_SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = outputs.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
_SCREAMING_SNAKE_CASE = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_SCREAMING_SNAKE_CASE = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(__lowerCamelCase ),
"""epoch""": epoch,
} , step=__lowerCamelCase , )
if checkpointing_steps == "epoch":
_SCREAMING_SNAKE_CASE = F'epoch_{epoch}'
if args.output_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase ( ) ->int:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=__lowerCamelCase , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=__lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__lowerCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
from timeit import timeit
def UpperCamelCase( UpperCAmelCase_ ):
if number < 0:
raise ValueError('the value of input must not be negative' )
UpperCAmelCase : Any = 0
while number:
number &= number - 1
result += 1
return result
def UpperCamelCase( UpperCAmelCase_ ):
if number < 0:
raise ValueError('the value of input must not be negative' )
UpperCAmelCase : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCamelCase( ):
def do_benchmark(UpperCAmelCase_ ) -> None:
UpperCAmelCase : Union[str, Any] = """import __main__ as z"""
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(_lowerCAmelCase ) = }""" )
UpperCAmelCase : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=_lowerCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(_lowerCAmelCase ) = }""" )
UpperCAmelCase : str = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=_lowerCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(_lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 371
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""pixel_values"""]
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Any , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase : Dict = size if size is not None else {'shortest_edge': 224}
UpperCAmelCase : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='crop_size' )
UpperCAmelCase : Optional[int] = do_resize
UpperCAmelCase : str = size
UpperCAmelCase : str = resample
UpperCAmelCase : Union[str, Any] = do_center_crop
UpperCAmelCase : Any = crop_size
UpperCAmelCase : Optional[Any] = do_rescale
UpperCAmelCase : List[Any] = rescale_factor
UpperCAmelCase : Any = do_normalize
UpperCAmelCase : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase : Union[str, Any] = do_convert_rgb
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
UpperCAmelCase : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase : str = get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
UpperCAmelCase : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> Union[str, Any]:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : List[Any] , ) -> PIL.Image.Image:
UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : List[str] = size if size is not None else self.size
UpperCAmelCase : Dict = get_size_dict(lowercase_ , param_name='size' , default_to_square=lowercase_ )
UpperCAmelCase : int = resample if resample is not None else self.resample
UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : Union[str, Any] = get_size_dict(lowercase_ , param_name='crop_size' , default_to_square=lowercase_ )
UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase : int = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase : List[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase : Dict = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
UpperCAmelCase : List[str] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase : List[Any] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase : Optional[int] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : str = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 280
| 0
|
from __future__ import annotations
_UpperCAmelCase : str = list[tuple[int, int]]
_UpperCAmelCase : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCAmelCase : Tuple = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float , UpperCAmelCase : Node | None , ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = pos_x
lowerCamelCase__ : List[Any] = pos_y
lowerCamelCase__ : Union[str, Any] = (pos_y, pos_x)
lowerCamelCase__ : Optional[Any] = goal_x
lowerCamelCase__ : Optional[int] = goal_y
lowerCamelCase__ : str = g_cost
lowerCamelCase__ : int = parent
lowerCamelCase__ : Union[str, Any] = self.calculate_heuristic()
def A_ ( self : Optional[Any] ) -> float:
lowerCamelCase__ : Dict = abs(self.pos_x - self.goal_x )
lowerCamelCase__ : int = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : int , UpperCAmelCase : int ) -> bool:
return self.f_cost < other.f_cost
class lowerCAmelCase :
def __init__( self : List[str] , UpperCAmelCase : tuple[int, int] , UpperCAmelCase : tuple[int, int] ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase )
lowerCamelCase__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , UpperCAmelCase )
lowerCamelCase__ : List[str] = [self.start]
lowerCamelCase__ : list[Node] = []
lowerCamelCase__ : Tuple = False
def A_ ( self : Any ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase__ : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase__ : Optional[int] = True
return self.retrace_path(UpperCAmelCase )
self.closed_nodes.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.get_successors(UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase )
else:
# retrieve the best current path
lowerCamelCase__ : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase )
else:
self.open_nodes.append(UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def A_ ( self : int , UpperCAmelCase : Node ) -> list[Node]:
lowerCamelCase__ : List[Any] = []
for action in delta:
lowerCamelCase__ : Union[str, Any] = parent.pos_x + action[1]
lowerCamelCase__ : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase , UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase , ) )
return successors
def A_ ( self : Tuple , UpperCAmelCase : Node | None ) -> Path:
lowerCamelCase__ : Optional[Any] = node
lowerCamelCase__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase__ : Optional[int] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_UpperCAmelCase : Tuple = (0, 0)
_UpperCAmelCase : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
_UpperCAmelCase : str = GreedyBestFirst(init, goal)
_UpperCAmelCase : int = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_UpperCAmelCase : int = 2
for elem in grid:
print(elem)
| 50
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :Tuple = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Tuple = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = [sequences]
A_ : List[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : str = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Optional[int] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : Any = self.tokenizer.eos_token
try:
A_ : Optional[Any] = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Union[str, Any] = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Tuple = {}
if "candidate_labels" in kwargs:
A_ : Optional[int] = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : Union[str, Any] = {}
if "multi_label" in kwargs:
A_ : Optional[int] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Any = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : Any = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Dict = inputs["""candidate_label"""]
A_ : Any = inputs["""sequence"""]
A_ : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : Optional[int] = self.model(**lowercase )
A_ : Optional[Any] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Tuple = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : Optional[int] = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Union[str, Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : List[str] = logits.shape[0]
A_ : Optional[int] = len(lowercase )
A_ : int = N // n
A_ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Dict = self.entailment_id
A_ : Dict = -1 if entailment_id == 0 else 0
A_ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Optional[Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Tuple = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : Any = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 206
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A : Union[str, Any] = logging.get_logger(__name__)
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : List[str] , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Dict , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Dict , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : str , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[Any] , )->int:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Tuple , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , )->np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = to_numpy_array(__UpperCamelCase )
if do_resize:
_UpperCAmelCase = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
_UpperCAmelCase = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
_UpperCAmelCase = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase )
if do_normalize:
_UpperCAmelCase = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
_UpperCAmelCase = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowercase__ ( self : Optional[int] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : Any , )->PIL.Image.Image:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_UpperCAmelCase = make_batched(__UpperCamelCase )
_UpperCAmelCase = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
_UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 260
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__A : Union[str, Any] = "\\n\n"
__A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : List[Any] )->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = '''cuda'''
else:
_UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = model.to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
_UpperCAmelCase = encodings['''input_ids''']
_UpperCAmelCase = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
_UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 260
| 1
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = FunnelTokenizer
__UpperCamelCase = FunnelTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
super().setUp()
A_ : Tuple = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self :List[Any] , **snake_case :Any ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self :str , **snake_case :str ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict ):
'''simple docstring'''
A_ : List[str] = '''UNwant\u00E9d,running'''
A_ : Dict = '''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : str = self.tokenizer_class(self.vocab_file )
A_ : Dict = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : int = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
A_ : Union[str, Any] = tokenizer("UNwant\u00E9d,running" )
A_ : Optional[int] = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
A_ : Any = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 357
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] ) -> Dict:
A_ : Optional[Any] = 0
if start < end:
A_ : Tuple = randint(_lowerCAmelCase , _lowerCAmelCase )
A_ : str = a[end]
A_ : Optional[Any] = a[pivot]
A_ : List[str] = temp
A_ , A_ : int = _in_place_partition(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += _in_place_quick_sort(_lowerCAmelCase , _lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(_lowerCAmelCase , p + 1 , _lowerCAmelCase )
return count
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> str:
A_ : Union[str, Any] = 0
A_ : List[str] = randint(_lowerCAmelCase , _lowerCAmelCase )
A_ : str = a[end]
A_ : str = a[pivot]
A_ : Any = temp
A_ : int = start - 1
for index in range(_lowerCAmelCase , _lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
A_ : Union[str, Any] = new_pivot_index + 1
A_ : Union[str, Any] = a[new_pivot_index]
A_ : Union[str, Any] = a[index]
A_ : Union[str, Any] = temp
A_ : Tuple = a[new_pivot_index + 1]
A_ : Optional[int] = a[end]
A_ : Dict = temp
return new_pivot_index + 1, count
_lowerCAmelCase : List[str] = TemporaryFile()
_lowerCAmelCase : int = 100 # 1000 elements are to be sorted
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 0, 1 # mean and standard deviation
_lowerCAmelCase : int = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
_lowerCAmelCase : Optional[Any] = np.load(outfile)
_lowerCAmelCase : Optional[int] = len(M) - 1
_lowerCAmelCase : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 70
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : int= {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str]= [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_a : str= _LazyModule(__name__, globals()["__file__"], _import_structure)
| 172
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_a : List[Any]= re.compile(R"\s+")
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> int:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCAmelCase_ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case : Any = [len(UpperCAmelCase_ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(UpperCAmelCase_ ), "line_max": max(UpperCAmelCase_ )}
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
__snake_case : Tuple = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=5 ) -> str:
'''simple docstring'''
__snake_case : Tuple = ['auto-generated', 'autogenerated', 'automatically generated']
__snake_case : Tuple = example['content'].splitlines()
for _, line in zip(range(UpperCAmelCase_ ) , UpperCAmelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : Optional[int]=0.05 ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = ['unit tests', 'test file', 'configuration file']
__snake_case : Tuple = example['content'].splitlines()
__snake_case : Tuple = 0
__snake_case : Any = 0
# first test
for _, line in zip(range(UpperCAmelCase_ ) , UpperCAmelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__snake_case : int = example['content'].count('\n' )
__snake_case : str = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Any = ['def ', 'class ', 'for ', 'while ']
__snake_case : Optional[int] = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=4 ) -> Dict:
'''simple docstring'''
__snake_case : Optional[Any] = example['content'].splitlines()
__snake_case : Tuple = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = tokenizer(example['content'] , truncation=UpperCAmelCase_ )['input_ids']
__snake_case : Union[str, Any] = len(example['content'] ) / len(UpperCAmelCase_ )
return {"ratio": ratio}
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
__snake_case : List[Any] = {}
results.update(get_hash(UpperCAmelCase_ ) )
results.update(line_stats(UpperCAmelCase_ ) )
results.update(alpha_stats(UpperCAmelCase_ ) )
results.update(char_token_ratio(UpperCAmelCase_ ) )
results.update(is_autogenerated(UpperCAmelCase_ ) )
results.update(is_config_or_test(UpperCAmelCase_ ) )
results.update(has_no_keywords(UpperCAmelCase_ ) )
results.update(has_few_assignments(UpperCAmelCase_ ) )
return results
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> Any:
'''simple docstring'''
if not check_uniques(UpperCAmelCase_ , UpperCAmelCase_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> Any:
'''simple docstring'''
with open(UpperCAmelCase_ , 'rb' ) as f_in:
with gzip.open(str(UpperCAmelCase_ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCAmelCase_ , UpperCAmelCase_ )
os.unlink(UpperCAmelCase_ )
# Settings
_a : int= HfArgumentParser(PreprocessingArguments)
_a : Union[str, Any]= parser.parse_args()
if args.num_workers is None:
_a : str= multiprocessing.cpu_count()
_a : Optional[int]= AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_a : Tuple= time.time()
_a : Dict= load_dataset(args.dataset_name, split="train")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_a : str= time.time()
_a : int= ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_a : Tuple= set(ds.unique("hash"))
_a : Optional[int]= len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_a : Union[str, Any]= time.time()
_a : List[Any]= ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_a : Tuple= time.time()
_a, _a : Tuple= deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_a : Union[str, Any]= Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
_a : List[Any]= output_dir / "data"
data_dir.mkdir(exist_ok=True)
_a : Tuple= time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_a : List[str]= str(data_dir / f'''file-{file_number+1:012}.json''')
_a : List[str]= min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 172
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ""
else:
UpperCAmelCase_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowerCAmelCase__ , )
UpperCAmelCase_ = ViTHybridConfig(backbone_config=lowerCAmelCase__ , image_size=384 , num_labels=1000 )
UpperCAmelCase_ = False
# load original model from timm
UpperCAmelCase_ = timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase__ )
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = ViTHybridModel(lowerCAmelCase__ ).eval()
else:
UpperCAmelCase_ = ViTHybridForImageClassification(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = ViTHybridImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
UpperCAmelCase_ = timm_model.forward_features(lowerCAmelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase__ , outputs.pooler_output , atol=1e-3 )
else:
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
lowerCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 241
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''rwkv'''
UpperCamelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , _UpperCAmelCase : int=50277 , _UpperCAmelCase : Optional[Any]=1024 , _UpperCAmelCase : str=4096 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Tuple=1e-5 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=6 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = context_length
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = rescale_every
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 241
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
snake_case : List[Any] = len(lowercase )
# We need to create solution object to save path.
snake_case : str = [[0 for _ in range(lowercase )] for _ in range(lowercase )]
snake_case : Dict = run_maze(lowercase ,0 ,0 ,lowercase )
if solved:
print("""\n""".join(str(lowercase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> bool:
snake_case : Optional[Any] = len(lowercase )
# Final check point.
if i == j == (size - 1):
snake_case : List[str] = 1
return True
snake_case : Dict = (not i < 0) and (not j < 0) # Check lower bounds
snake_case : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case : Optional[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case : Optional[int] = 1
# check for directions
if (
run_maze(lowercase ,i + 1 ,lowercase ,lowercase )
or run_maze(lowercase ,lowercase ,j + 1 ,lowercase )
or run_maze(lowercase ,i - 1 ,lowercase ,lowercase )
or run_maze(lowercase ,lowercase ,j - 1 ,lowercase )
):
return True
snake_case : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
|
from __future__ import annotations
import math
lowerCamelCase : Optional[int] = '2020.9.26'
lowerCamelCase : int = 'xcodz-dot, cclaus, dhruvmanila'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> tuple[float, float]:
if not all(isinstance(lowercase ,(float, int) ) for val in locals().values() ):
snake_case : Dict = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(lowercase )
snake_case : List[str] = ((x * distance) / (z + distance)) * scale
snake_case : Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> tuple[float, float, float]:
if not isinstance(lowercase ,lowercase ):
raise TypeError("""Axis must be a str""" )
snake_case : Tuple = locals()
del input_variables["axis"]
if not all(isinstance(lowercase ,(float, int) ) for val in input_variables.values() ):
snake_case : int = (
"""Input values except axis must either be float or int: """
f"""{list(input_variables.values() )}"""
)
raise TypeError(lowercase )
snake_case : int = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
snake_case : str = x * math.cos(lowercase ) - y * math.sin(lowercase )
snake_case : List[Any] = y * math.cos(lowercase ) + x * math.sin(lowercase )
snake_case : Optional[int] = z
elif axis == "x":
snake_case : Optional[Any] = y * math.cos(lowercase ) - z * math.sin(lowercase )
snake_case : Optional[int] = z * math.cos(lowercase ) + y * math.sin(lowercase )
snake_case : Optional[int] = x
elif axis == "y":
snake_case : List[str] = x * math.cos(lowercase ) - z * math.sin(lowercase )
snake_case : Tuple = z * math.cos(lowercase ) + x * math.sin(lowercase )
snake_case : Optional[int] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 124
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 355
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
A_ : List[Any] = self.unet.config.sample_size
A_ : List[Any] = (batch_size, 3, img_size, img_size)
A_ : List[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A_ : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A_ : str = self.scheduler.schedule[t]
A_ : List[str] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A_ , A_ : List[str] = self.scheduler.add_noise_to_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A_ : Dict = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : int = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A_ : Optional[Any] = self.scheduler.step_correct(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step_output.prev_sample , step_output['''derivative'''] , )
A_ : List[Any] = step_output.prev_sample
A_ : Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 )
A_ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Dict = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 65
| 0
|
'''simple docstring'''
__lowerCAmelCase = {str(digit): digit**5 for digit in range(1_0)}
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(__a ) )
if __name__ == "__main__":
print(solution())
| 271
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
for char in word:
_a : Union[str, Any] = ord(__a )
if not _is_chinese_char(__a ):
return 0
return 1
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = set()
for token in tokens:
_a : str = len(__a ) > 1 and is_chinese(__a )
if chinese_word:
word_set.add(__a )
_a : Optional[Any] = list(__a )
return word_list
def UpperCAmelCase_ (__a : List[str] , __a : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a : Optional[Any] = max([len(__a ) for w in chinese_word_set] )
_a : Optional[int] = bert_tokens
_a, _a : Any = 0, len(__a )
while start < end:
_a : Tuple = True
if is_chinese(bert_word[start] ):
_a : Union[str, Any] = min(end - start , __a )
for i in range(__a , 1 , -1 ):
_a : Optional[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_a : Any = '##' + bert_word[j]
_a : Union[str, Any] = start + i
_a : int = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_ (__a : List[str] , __a : LTP , __a : BertTokenizer ):
"""simple docstring"""
_a : int = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
_a : Optional[Any] = [get_chinese_word(__a ) for r in res]
ltp_res.extend(__a )
assert len(__a ) == len(__a )
_a : str = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : List[str] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__a , truncation=__a , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(__a ) == len(__a )
_a : List[str] = []
for input_ids, chinese_word in zip(__a , __a ):
_a : int = []
for id in input_ids:
_a : Optional[int] = bert_tokenizer._convert_id_to_token(__a )
input_tokens.append(__a )
_a : List[str] = add_sub_symbol(__a , __a )
_a : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__a ):
if token[:2] == "##":
_a : str = token[2:]
# save chinese tokens' pos
if len(__a ) == 1 and _is_chinese_char(ord(__a ) ):
ref_id.append(__a )
ref_ids.append(__a )
assert len(__a ) == len(__a )
return ref_ids
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.readlines()
_a : int = [line.strip() for line in data if len(__a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a : int = LTP(args.ltp ) # faster in GPU device
_a : Tuple = BertTokenizer.from_pretrained(args.bert )
_a : int = prepare_ref(__a , __a , __a )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_a : Optional[Any] = [json.dumps(__a ) + '\n' for ref in ref_ids]
f.writelines(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
__lowerCAmelCase = parser.parse_args()
main(args)
| 271
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : str , snake_case__ : List[str]=13 , snake_case__ : Union[str, Any]=7 , snake_case__ : str=True , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=True , snake_case__ : Dict=True , snake_case__ : Optional[Any]=99 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=32 , snake_case__ : Any=5 , snake_case__ : Dict=4 , snake_case__ : List[str]=37 , snake_case__ : str="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[str]=5_12 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : Any=3 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : Union[str, Any] = seq_length
snake_case : str = is_training
snake_case : Dict = use_input_mask
snake_case : Tuple = use_token_type_ids
snake_case : str = use_labels
snake_case : int = vocab_size
snake_case : List[str] = hidden_size
snake_case : Dict = embedding_size
snake_case : Dict = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : int = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : Tuple = type_sequence_label_size
snake_case : Tuple = initializer_range
snake_case : List[str] = num_labels
snake_case : Dict = num_choices
snake_case : List[str] = scope
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[Any] = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
if self.use_token_type_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : str = None
snake_case : Tuple = None
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Tuple ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = MegatronBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
snake_case : Dict = model(snake_case_ , token_type_ids=snake_case_ )
snake_case : Tuple = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : str ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = MegatronBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = MegatronBertForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : int = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = MegatronBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Tuple = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = MegatronBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : str = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = MegatronBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Optional[int] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = self.num_labels
snake_case : Optional[int] = MegatronBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.num_labels
snake_case : Dict = MegatronBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str ) -> Dict:
'''simple docstring'''
snake_case : str = self.num_choices
snake_case : Union[str, Any] = MegatronBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = self.prepare_config_and_inputs()
(
snake_case
) : str = config_and_inputs
snake_case : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __lowercase ,__lowercase ,unittest.TestCase ):
A__ : Any = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : Tuple = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
# test_resize_embeddings = False
A__ : List[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Dict=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
snake_case : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
snake_case : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = MegatronBertModelTester(self )
snake_case : Any = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case_ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case_ )
def UpperCamelCase ( __lowerCamelCase : str ):
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
__lowerCamelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
snake_case : Tuple = os.path.join(os.environ["MYDIR"] , snake_case_ )
snake_case : Any = MegatronBertModel.from_pretrained(snake_case_ )
model.to(snake_case_ )
model.half()
snake_case : Union[str, Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
snake_case : List[str] = model(snake_case_ )[0]
snake_case : Optional[int] = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , snake_case_ )
snake_case : Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
snake_case : Optional[int] = output[0, ii, jj]
snake_case : Any = expected[3 * ii + jj]
snake_case : Union[str, Any] = '''ii={} jj={} a={} b={}'''.format(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertTrue(math.isclose(snake_case_ , snake_case_ , rel_tol=snake_case_ , abs_tol=snake_case_ ) , msg=snake_case_ )
| 351
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10
| 0
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase = len(__lowercase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(__lowercase )]
# Reverse whole list
UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(__lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
__A : str = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 273
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22
| 0
|
import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( _a : str = "AAPL" ):
snake_case_ : List[Any] = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
snake_case_ : Optional[Any] = BeautifulSoup(requests.get(_a ).text , "html.parser" )
snake_case_ : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 36
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : Tuple = resample
snake_case_ : Dict = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : int = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : Optional[int] = do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
snake_case_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[int] = image_std if image_std is not None else self.image_std
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : int = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case_ : Optional[Any] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case_ : List[Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case_ : List[str] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : List[str] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 36
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCAmelCase :
def __init__( self , _A , _A=2 , _A=32 , _A=16 , _A=3 , _A=True , _A=True , _A=32 , _A=4 , _A=[0, 1, 2, 3] , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.02 , _A=3 , _A=[1, 3_84, 24, 24] , _A=True , _A=None , ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[str] = backbone_out_indices
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Tuple = backbone_featmap_shape
_UpperCAmelCase : List[str] = scope
_UpperCAmelCase : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2
_UpperCAmelCase : Optional[Any] = num_patches + 1
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 1_92, 3_84, 7_68],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_A , backbone_featmap_shape=self.backbone_featmap_shape , )
def __snake_case ( self , _A , _A , _A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = DPTModel(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : Tuple = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , _A , _A , _A ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : int = DPTForDepthEstimation(_A )
model.to(_A )
model.eval()
_UpperCAmelCase : List[Any] = model(_A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __snake_case ( self , _A , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : List[Any] = DPTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase):
__a : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a : Optional[int] = False
__a : str = False
__a : Tuple = False
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = DPTModelTester(self )
_UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
pass
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] = model_class(_A )
_UpperCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_A )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = True
if model_class in get_values(_A ):
continue
_UpperCAmelCase : int = model_class(_A )
model.to(_A )
model.train()
_UpperCAmelCase : str = self._prepare_for_class(_A , _A , return_labels=_A )
_UpperCAmelCase : Dict = model(**_A ).loss
loss.backward()
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = True
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase : Tuple = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase : List[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
_UpperCAmelCase : Tuple = model(**_A ).loss
loss.backward()
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = _config_zero_init(_A )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(config=_A )
# Skip the check for the backbone
_UpperCAmelCase : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase : Union[str, Any] = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
def __snake_case ( self ) -> Any:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase : Optional[int] = DPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = "add"
with self.assertRaises(_A ):
_UpperCAmelCase : List[str] = DPTForDepthEstimation(_A )
def UpperCamelCase ( ) -> Any:
_UpperCAmelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase : List[str] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(_A )
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : int = image_processor(images=_A , return_tensors="""pt""" ).to(_A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**_A )
_UpperCAmelCase : List[Any] = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase : List[str] = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _A )
_UpperCAmelCase : Tuple = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _A , atol=1e-4 ) )
| 246
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
"""simple docstring"""
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase : int = int(max(0 , i - limit ) )
_UpperCAmelCase : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = F"""{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"""
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase : Union[str, Any] = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase : Optional[Any] = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase : Dict = 0.0
else:
_UpperCAmelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase : str = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 31
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''falcon'''
lowerCamelCase_ = ['''past_key_values''']
def __init__( self : str , __lowerCamelCase : Union[str, Any]=6_5_0_2_4 , __lowerCamelCase : Optional[Any]=4_5_4_4 , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : str=7_1 , __lowerCamelCase : List[str]=1e-5 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : Any=True , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Any=1_1 , __lowerCamelCase : Any=1_1 , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = vocab_size
# Backward compatibility with n_embed kwarg
_SCREAMING_SNAKE_CASE = kwargs.pop("n_embed" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = hidden_dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = num_attention_heads if num_kv_heads is None else num_kv_heads
_SCREAMING_SNAKE_CASE = alibi
_SCREAMING_SNAKE_CASE = new_decoder_architecture
_SCREAMING_SNAKE_CASE = multi_query # Ignored when new_decoder_architecture is True
_SCREAMING_SNAKE_CASE = parallel_attn
_SCREAMING_SNAKE_CASE = bias
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return not self.alibi
| 111
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 111
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Dict = logging.get_logger(__name__)
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["input_features"]
def __init__( self , UpperCamelCase=80 , UpperCamelCase=1_6000 , UpperCamelCase=160 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=0.0 , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = n_fft
lowerCamelCase_ = hop_length
lowerCamelCase_ = chunk_length
lowerCamelCase_ = chunk_length * sampling_rate
lowerCamelCase_ = self.n_samples // hop_length
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = spectrogram(
UpperCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowerCamelCase_ = log_spec[:, :-1]
lowerCamelCase_ = np.maximum(UpperCamelCase , log_spec.max() - 8.0 )
lowerCamelCase_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowerCamelCase_ = np.array(UpperCamelCase , np.intaa )
lowerCamelCase_ = []
for vector, length in zip(UpperCamelCase , attention_mask.sum(-1 ) ):
lowerCamelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase_ = padding_value
normed_input_values.append(UpperCamelCase )
else:
lowerCamelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , UpperCamelCase , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "max_length" , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase_ = isinstance(UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
lowerCamelCase_ = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [np.asarray([raw_speech] ).T]
lowerCamelCase_ = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowerCamelCase_ = self.pad(
UpperCamelCase , padding=UpperCamelCase , max_length=max_length if max_length else self.n_samples , truncation=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase_ = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowerCamelCase_ = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowerCamelCase_ = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowerCamelCase_ = [self._np_extract_fbank_features(UpperCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , UpperCamelCase ):
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase_ = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(UpperCamelCase )
return padded_inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 55
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.Size((1, 16) )
self.assertEqual(logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, )
self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 278
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : List[str] = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 263
|
"""simple docstring"""
def A ( snake_case :list[list[int]] , snake_case :int , snake_case :int , snake_case :list[int] ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def A ( snake_case :list[list[int]] , snake_case :list[int] , snake_case :int ) -> bool:
# Base Case
if curr_ind == len(snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(snake_case ) ):
if valid_connection(snake_case , snake_case , snake_case , snake_case ):
# Insert current vertex into path as next transition
__UpperCamelCase = next_ver
# Validate created path
if util_hamilton_cycle(snake_case , snake_case , curr_ind + 1 ):
return True
# Backtrack
__UpperCamelCase = -1
return False
def A ( snake_case :list[list[int]] , snake_case :int = 0 ) -> list[int]:
__UpperCamelCase = [-1] * (len(snake_case ) + 1)
# initialize start and end of path with starting index
__UpperCamelCase = __UpperCamelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(snake_case , snake_case , 1 ) else []
| 263
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ )
else:
UpperCAmelCase : Union[str, Any] =None
UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : VQModel
__lowerCamelCase : CLIPTextModel
__lowerCamelCase : CLIPTokenizer
__lowerCamelCase : TransformeraDModel
__lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
__lowerCamelCase : VQDiffusionScheduler
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1
# get prompt text embeddings
UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase : int =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 )
else:
UpperCAmelCase : str =[''''''] * batch_size
UpperCAmelCase : Tuple =text_input_ids.shape[-1]
UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1]
UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 )
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =1
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Tuple =len(snake_case__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' )
UpperCAmelCase : Tuple =batch_size * num_images_per_prompt
UpperCAmelCase : List[str] =guidance_scale > 1.0
UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(snake_case__ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1
UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase : Any =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device )
UpperCAmelCase : Optional[int] =latents
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 )
UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ )
UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase : Optional[Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim
UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ )
UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample
UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ )
UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ )
UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ )
UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase : int =keep_mask[:, :-1, :]
UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase : Dict =log_p_x_0.clone()
UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0)
return rv
| 348
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : List[Any] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Union[str, Any] =use_input_mask
UpperCAmelCase : Tuple =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : Tuple =hidden_size
UpperCAmelCase : Dict =projection_dim
UpperCAmelCase : Optional[int] =num_hidden_layers
UpperCAmelCase : Dict =num_attention_heads
UpperCAmelCase : int =intermediate_size
UpperCAmelCase : Any =dropout
UpperCAmelCase : Union[str, Any] =attention_dropout
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : str =scope
UpperCAmelCase : str =bos_token_id
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int =None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Optional[int] =input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape
UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : List[Any] =1
UpperCAmelCase : Tuple =0
UpperCAmelCase : List[Any] =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ )
UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ )
UpperCAmelCase : str =model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs
UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Dict = False
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =BlipTextModelTester(self )
UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__=True ) -> Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
| 348
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Tuple ="""informer"""
lowercase : Union[str, Any] ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "student_t" , UpperCamelCase_ = "nll" , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = "mean" , UpperCamelCase_ = 0 , UpperCamelCase_ = 0 , UpperCamelCase_ = 0 , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 64 , UpperCamelCase_ = 32 , UpperCamelCase_ = 32 , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , UpperCamelCase_ = True , UpperCamelCase_ = "gelu" , UpperCamelCase_ = 0.05 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 100 , UpperCamelCase_ = 0.02 , UpperCamelCase_=True , UpperCamelCase_ = "prob" , UpperCamelCase_ = 5 , UpperCamelCase_ = True , **UpperCamelCase_ , ):
# time series specific configuration
lowercase_ :Optional[Any] = prediction_length
lowercase_ :List[Any] = context_length or prediction_length
lowercase_ :Optional[int] = distribution_output
lowercase_ :Dict = loss
lowercase_ :List[Any] = input_size
lowercase_ :Optional[int] = num_time_features
lowercase_ :Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase_ :Tuple = scaling
lowercase_ :Tuple = num_dynamic_real_features
lowercase_ :Dict = num_static_real_features
lowercase_ :Optional[Any] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase_ :Dict = cardinality
else:
lowercase_ :List[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase_ :int = embedding_dimension
else:
lowercase_ :List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase_ :List[str] = num_parallel_samples
# Transformer architecture configuration
lowercase_ :Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase_ :Dict = d_model
lowercase_ :List[Any] = encoder_attention_heads
lowercase_ :Tuple = decoder_attention_heads
lowercase_ :Dict = encoder_ffn_dim
lowercase_ :Optional[Any] = decoder_ffn_dim
lowercase_ :Optional[Any] = encoder_layers
lowercase_ :Tuple = decoder_layers
lowercase_ :List[str] = dropout
lowercase_ :Dict = attention_dropout
lowercase_ :str = activation_dropout
lowercase_ :Union[str, Any] = encoder_layerdrop
lowercase_ :Dict = decoder_layerdrop
lowercase_ :Dict = activation_function
lowercase_ :Dict = init_std
lowercase_ :Optional[Any] = use_cache
# Informer
lowercase_ :Optional[Any] = attention_type
lowercase_ :Union[str, Any] = sampling_factor
lowercase_ :Optional[Any] = distil
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 357
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Any = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE : Tuple = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Tuple =VOCAB_FILES_NAMES
lowercase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowercase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[Any] =["""input_ids""", """attention_mask"""]
lowercase : str =TaTokenizer
lowercase : List[int] =[]
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_=100 , UpperCamelCase_=None , **UpperCamelCase_ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ :Union[str, Any] = [f"<extra_id_{i}>" for i in range(UpperCamelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ :Tuple = len(set(filter(lambda UpperCamelCase_ : bool('''extra_id_''' in str(UpperCamelCase_ ) ) , UpperCamelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase_ :Union[str, Any] = vocab_file
lowercase_ :Optional[int] = False if not self.vocab_file else True
lowercase_ :Dict = extra_ids
@staticmethod
def UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ :Optional[int] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase_ , )
return max_model_length
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ :Dict = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ :str = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase ( self ):
return list(
set(filter(lambda UpperCamelCase_ : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase ( self ):
return [self.convert_tokens_to_ids(UpperCamelCase_ ) for token in self.get_sentinel_tokens()]
| 252
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Any = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = ['DeiTFeatureExtractor']
lowerCAmelCase :Union[str, Any] = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 331
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( a__ , a__ ) -> Optional[int]:
__a = old_name
if "patch_embed" in old_name:
__a , __a , __a = old_name.split('''.''' )
if layer == "0":
__a = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__a = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__a = old_name.replace('''3''' , '''convolution2''' )
else:
__a = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , a__ ):
__a = R'''\b\d{2}\b'''
if bool(re.search(a__ , a__ ) ):
__a = re.search(R'''\d\.\d\d.''' , a__ ).group()
else:
__a = re.search(R'''\d\.\d.''' , a__ ).group()
if int(match[0] ) < 6:
__a = old_name.replace(a__ , '''''' )
__a = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__a = '''intermediate_stages.''' + trimmed_name
else:
__a = old_name.replace(a__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__a = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__a = str(int(match[2] ) - num_meta4D_last_stage )
__a = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__a = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__a = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__a = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__a = trimmed_name.replace('''fc2''' , '''linear_out''' )
__a = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , a__ ):
__a = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__a = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__a = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__a = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__a = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__a = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a = new_name.replace('''norm''' , '''layernorm''' )
__a = '''efficientformer.''' + new_name
else:
__a = '''efficientformer.encoder.''' + new_name
return new_name
def __lowerCAmelCase ( a__ , a__ ) -> Optional[int]:
for key in checkpoint.copy().keys():
__a = checkpoint.pop(a__ )
__a = val
return checkpoint
def __lowerCAmelCase ( ) -> str:
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Any:
__a = torch.load(a__ , map_location='''cpu''' )['''model''']
__a = EfficientFormerConfig.from_json_file(a__ )
__a = EfficientFormerForImageClassificationWithTeacher(a__ )
__a = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__a = config.depths[-1] - config.num_metaad_blocks + 1
__a = convert_torch_checkpoint(a__ , a__ )
model.load_state_dict(a__ )
model.eval()
__a = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__a = prepare_img()
__a = 256
__a = 224
__a = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__a = processor(images=a__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__a = Compose(
[
Resize(a__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(a__ ),
ToTensor(),
Normalize(a__ , a__ ),
] )
__a = image_transforms(a__ ).unsqueeze(0 )
assert torch.allclose(a__ , a__ )
__a = model(a__ )
__a = outputs.logits
__a = (1, 1000)
if "l1" in model_name:
__a = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , a__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , a__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(a__ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=a__ , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=a__ , )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
A : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 33
|
import functools
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = len(a__ )
__a = len(a__ )
@functools.cache
def min_distance(a__ , a__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a__ ) , 1 + min_distance(a__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : int = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : int , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = path_or_paths
UpperCamelCase__ : List[Any] = split if split or isinstance(lowerCamelCase__ , lowerCamelCase__ ) else '''train'''
UpperCamelCase__ : Optional[Any] = features
UpperCamelCase__ : List[Any] = cache_dir
UpperCamelCase__ : Optional[int] = keep_in_memory
UpperCamelCase__ : int = streaming
UpperCamelCase__ : Union[str, Any] = num_proc
UpperCamelCase__ : List[Any] = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : int , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = features
UpperCamelCase__ : Optional[int] = cache_dir
UpperCamelCase__ : Union[str, Any] = keep_in_memory
UpperCamelCase__ : Tuple = streaming
UpperCamelCase__ : Optional[Any] = num_proc
UpperCamelCase__ : Union[str, Any] = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Tuple ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 146
| 0
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 225
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""CLIPFeatureExtractor"""]
lowercase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
if not len(__A ) == len(__A ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_snake_case , _snake_case , _snake_case : Dict = equationa
_snake_case , _snake_case , _snake_case : Dict = equationa
# Calculate the determinants of the matrices
_snake_case : List[Any] = aa * ba - aa * ba
_snake_case : List[str] = ca * ba - ca * ba
_snake_case : int = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case : Dict = determinant_x / determinant
_snake_case : Any = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 64
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = ['input_values', 'attention_mask']
def __init__(self : Any , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_6_0_0_0 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : int = 8_0 , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : str = "hann_window" , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 8_0 , __UpperCAmelCase : float = 7_6_0_0 , __UpperCAmelCase : float = 1E-10 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = return_attention_mask
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = hop_length
UpperCAmelCase__ = win_length
UpperCAmelCase__ = win_function
UpperCAmelCase__ = frame_signal_scale
UpperCAmelCase__ = fmin
UpperCAmelCase__ = fmax
UpperCAmelCase__ = mel_floor
UpperCAmelCase__ = reduction_factor
UpperCAmelCase__ = win_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = hop_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = optimal_fft_length(self.sample_size )
UpperCAmelCase__ = (self.n_fft // 2) + 1
UpperCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
UpperCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ (__UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
UpperCAmelCase__ = np.array(__UpperCAmelCase , np.intaa )
UpperCAmelCase__ = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
UpperCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowercase_ (self : Optional[int] , __UpperCAmelCase : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = spectrogram(
__UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__(self : Any , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : str , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
else:
UpperCAmelCase__ = None
if audio_target is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase__ = inputs_target["input_values"]
UpperCAmelCase__ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase__ = decoder_attention_mask
return inputs
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = isinstance(__UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase__ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
UpperCAmelCase__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase__ = [self._extract_mel_features(__UpperCAmelCase ) for waveform in speech]
UpperCAmelCase__ = BatchFeature({"input_values": features} )
UpperCAmelCase__ = self.num_mel_bins
else:
UpperCAmelCase__ = BatchFeature({"input_values": speech} )
UpperCAmelCase__ = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = feature_size_hack
# convert input values to correct format
UpperCAmelCase__ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase__ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase__ = (
attention_mask
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=__UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def lowercase_ (self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase__ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 65
| 0
|
def __lowerCamelCase ( __magic_name__ : int = 50 ):
a__: List[Any] =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__UpperCAmelCase = random.Random()
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=1.0 , __magic_name__ : Any=None , __magic_name__ : int=None ):
if rng is None:
a__: str =global_rng
a__: Dict =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : str , _a : Any , _a : Union[str, Any]=7 , _a : Any=4_0_0 , _a : Optional[int]=2_0_0_0 , _a : Tuple=1 , _a : str=0.0 , _a : Optional[Any]=1_6_0_0_0 , _a : List[str]=True , _a : Dict=8_0 , _a : List[str]=1_6 , _a : Union[str, Any]=6_4 , _a : Any="hann_window" , _a : Dict=8_0 , _a : int=7_6_0_0 , _a : List[str]=1e-10 , _a : int=True , ):
a__: Tuple =parent
a__: Optional[int] =batch_size
a__: Dict =min_seq_length
a__: Any =max_seq_length
a__: Dict =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: str =feature_size
a__: List[Any] =padding_value
a__: Union[str, Any] =sampling_rate
a__: List[str] =do_normalize
a__: Any =num_mel_bins
a__: Dict =hop_length
a__: Optional[int] =win_length
a__: List[Any] =win_function
a__: int =fmin
a__: List[str] =fmax
a__: List[str] =mel_floor
a__: str =return_attention_mask
def _lowerCamelCase ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCamelCase ( self : str , _a : List[str]=False , _a : Optional[Any]=False ):
def _flatten(_a : List[str] ):
return list(itertools.chain(*_a ) )
if equal_length:
a__: int =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__: Any =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__: Optional[int] =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
def _lowerCamelCase ( self : int , _a : Union[str, Any]=False , _a : str=False ):
if equal_length:
a__: Union[str, Any] =[floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__: Optional[Any] =[
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__: Optional[int] =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase__ ( _a , unittest.TestCase ):
_lowerCAmelCase = SpeechTaFeatureExtractor
def _lowerCamelCase ( self : Optional[Any] ):
a__: List[str] =SpeechTaFeatureExtractionTester(self )
def _lowerCamelCase ( self : Optional[int] , _a : Union[str, Any] ):
self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self : Union[str, Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__: Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__: Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Union[str, Any] =[np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
a__: Union[str, Any] =feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a__: List[Any] =feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
a__: Tuple =feat_extract(_a , return_tensors="np" ).input_values
a__: int =feat_extract(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def _lowerCamelCase ( self : int ):
a__: Optional[int] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Tuple =["longest", "max_length", "do_not_pad"]
a__: List[str] =[None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
a__: Optional[int] =feat_extract(_a , padding=_a , max_length=_a , return_tensors="np" )
a__: Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCamelCase ( self : Any ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Tuple =range(8_0_0 , 1_4_0_0 , 2_0_0 )
a__: List[Any] =[floats_list((1, x) )[0] for x in lengths]
a__: Optional[int] =["longest", "max_length", "do_not_pad"]
a__: Optional[Any] =[None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
a__: List[Any] =feat_extract(_a , max_length=_a , padding=_a )
a__: Optional[Any] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCamelCase ( self : str ):
a__: List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: str =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: int =feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
a__: Any =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self : Optional[int] ):
a__: Optional[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Dict =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: int =feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
a__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
a__: int =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Tuple =feat_extract(
_a , truncation=_a , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
a__: Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowerCamelCase ( self : Any ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Dict =np.random.rand(1_0_0 ).astype(np.floataa )
a__: str =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: int =feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__: int =feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__: int =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__: Optional[int] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Any =[np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
a__: Dict =feature_extractor(audio_target=_a , padding=_a , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a__: Tuple =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a__: str =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
a__: List[Any] =feature_extractor(_a , return_tensors="np" ).input_values
a__: Union[str, Any] =feature_extractor(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a__: Optional[int] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__: str =np.asarray(_a )
a__: int =feature_extractor(_a , return_tensors="np" ).input_values
a__: int =feature_extractor(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def _lowerCamelCase ( self : str ):
a__: str =self.feat_extract_tester.prepare_inputs_for_target()
a__: Dict =self.feature_extraction_class(**self.feat_extract_dict )
a__: int =feat_extract.model_input_names[0]
a__: Optional[Any] =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
a__: Any =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__: List[str] =BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a__: List[Any] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__: Tuple =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
a__: Tuple =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__: int =self.feature_extraction_class(**self.feat_extract_dict )
a__: Tuple =feat_extract.model_input_names[0]
a__: Optional[int] =BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a__: Optional[int] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__: Optional[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : str ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
a__: Tuple =self.feat_extract_tester.prepare_inputs_for_target()
a__: Optional[int] =feat_extract.model_input_names[0]
a__: Optional[int] =BatchFeature({input_name: speech_inputs} )
a__: Tuple =feat_extract.num_mel_bins # hack!
a__: int =feat_extract.pad(_a , padding="longest" , return_tensors="np" )[input_name]
a__: Dict =feat_extract.pad(_a , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCamelCase ( self : List[str] ):
a__: Dict =self.feat_extract_dict
a__: Optional[Any] =True
a__: Any =self.feature_extraction_class(**_a )
a__: List[str] =self.feat_extract_tester.prepare_inputs_for_target()
a__: Optional[Any] =[len(_a ) for x in speech_inputs]
a__: Any =feat_extract.model_input_names[0]
a__: Union[str, Any] =BatchFeature({input_name: speech_inputs} )
a__: List[Any] =feat_extract.num_mel_bins # hack!
a__: Any =feat_extract.pad(_a , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def _lowerCamelCase ( self : Any ):
a__: Tuple =self.feat_extract_dict
a__: Dict =True
a__: Tuple =self.feature_extraction_class(**_a )
a__: str =self.feat_extract_tester.prepare_inputs_for_target()
a__: List[str] =[len(_a ) for x in speech_inputs]
a__: Optional[int] =feat_extract.model_input_names[0]
a__: Optional[Any] =BatchFeature({input_name: speech_inputs} )
a__: List[str] =min(_a )
a__: Optional[int] =feat_extract.num_mel_bins # hack!
a__: Optional[Any] =feat_extract.pad(
_a , padding="max_length" , max_length=_a , truncation=_a , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowerCamelCase ( self : List[str] , _a : List[str] ):
from datasets import load_dataset
a__: List[Any] =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__: Union[str, Any] =ds.sort("id" ).select(range(_a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self : Tuple ):
# fmt: off
a__: Optional[Any] =torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
a__: Tuple =self._load_datasamples(1 )
a__: Tuple =SpeechTaFeatureExtractor()
a__: Any =feature_extractor(_a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _a , atol=1e-6 ) )
def _lowerCamelCase ( self : int ):
# fmt: off
a__: List[Any] =torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
a__: List[Any] =self._load_datasamples(1 )
a__: List[str] =SpeechTaFeatureExtractor()
a__: Any =feature_extractor(audio_target=_a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _a , atol=1e-4 ) )
| 42
| 1
|
from math import pi, sqrt
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
if num > 1_71.5:
raise OverflowError('math range error' )
elif num - int(UpperCamelCase__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(UpperCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(UpperCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = 1.0
while num:
__A = float(input("Gamma of: "))
print(f'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...")
| 90
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10
| 0
|
from __future__ import annotations
from math import pi
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''transfo-xl'''
UpperCamelCase = ['''mems''']
UpperCamelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , A_ : Optional[Any]=267735 , A_ : Optional[Any]=[20000, 40000, 200000] , A_ : Union[str, Any]=1024 , A_ : Optional[Any]=1024 , A_ : Optional[int]=16 , A_ : Any=64 , A_ : List[Any]=4096 , A_ : str=4 , A_ : int=False , A_ : List[Any]=18 , A_ : Optional[int]=1600 , A_ : Union[str, Any]=1000 , A_ : Optional[Any]=True , A_ : Optional[int]=True , A_ : List[str]=0 , A_ : int=-1 , A_ : List[Any]=True , A_ : List[Any]=0.1 , A_ : str=0.0 , A_ : Dict=True , A_ : Dict="normal" , A_ : Dict=0.01 , A_ : Optional[Any]=0.01 , A_ : Any=0.02 , A_ : int=1E-5 , A_ : List[str]=0 , **A_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
lowerCamelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase_ = [False] + [False] * len(self.cutoffs )
lowerCamelCase_ = d_model
lowerCamelCase_ = d_embed
lowerCamelCase_ = d_head
lowerCamelCase_ = d_inner
lowerCamelCase_ = div_val
lowerCamelCase_ = pre_lnorm
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = mem_len
lowerCamelCase_ = same_length
lowerCamelCase_ = attn_type
lowerCamelCase_ = clamp_len
lowerCamelCase_ = sample_softmax
lowerCamelCase_ = adaptive
lowerCamelCase_ = dropout
lowerCamelCase_ = dropatt
lowerCamelCase_ = untie_r
lowerCamelCase_ = init
lowerCamelCase_ = init_range
lowerCamelCase_ = proj_init_std
lowerCamelCase_ = init_std
lowerCamelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=A_ , **A_ )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , A_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 208
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=True, __a=False, __a=False, __a=False, __a=2, __a=99, __a=0, __a=32, __a=5, __a=4, __a=0.1, __a=0.1, __a=512, __a=12, __a=2, __a=0.02, __a=3, __a=4, __a="last", __a=None, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Union[str, Any] = use_input_lengths
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Optional[Any] = gelu_activation
_lowerCAmelCase : List[str] = sinusoidal_embeddings
_lowerCAmelCase : Dict = causal
_lowerCAmelCase : Union[str, Any] = asm
_lowerCAmelCase : str = n_langs
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = n_special
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : Tuple = summary_type
_lowerCAmelCase : Union[str, Any] = use_proj
_lowerCAmelCase : Tuple = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : str = None
if self.use_input_lengths:
_lowerCAmelCase : str = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowerCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], 2).float()
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case__ ( self):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FlaubertModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = model(__a, lengths=__a, langs=__a)
_lowerCAmelCase : Optional[Any] = model(__a, langs=__a)
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = FlaubertWithLMHeadModel(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : List[Any] = model(__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = FlaubertForQuestionAnsweringSimple(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Union[str, Any] = model(__a)
_lowerCAmelCase : List[str] = model(__a, start_positions=__a, end_positions=__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = FlaubertForQuestionAnswering(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : str = model(
__a, start_positions=__a, end_positions=__a, cls_index=__a, is_impossible=__a, p_mask=__a, )
_lowerCAmelCase : str = model(
__a, start_positions=__a, end_positions=__a, cls_index=__a, is_impossible=__a, )
((_lowerCAmelCase) , ) : List[Any] = result_with_labels.to_tuple()
_lowerCAmelCase : List[Any] = model(__a, start_positions=__a, end_positions=__a)
((_lowerCAmelCase) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : int = FlaubertForSequenceClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[int] = model(__a)
_lowerCAmelCase : List[Any] = model(__a, labels=__a)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Any = FlaubertForTokenClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Any = model(__a, attention_mask=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_choices
_lowerCAmelCase : str = FlaubertForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowerCAmelCase : Any = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowerCAmelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowerCAmelCase : Tuple = model(
__a, attention_mask=__a, token_type_ids=__a, labels=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[int] = config_and_inputs
_lowerCAmelCase : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : str = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_lowerCAmelCase : List[str] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__a)
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__a)
return inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FlaubertModelTester(self)
_lowerCAmelCase : List[str] = ConfigTester(self, config_class=__a, emb_dim=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = FlaubertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@slow
@require_torch_gpu
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : List[Any] = model_class(config=__a)
_lowerCAmelCase : Dict = self._prepare_for_class(__a, __a)
_lowerCAmelCase : Any = torch.jit.trace(
__a, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a, os.path.join(__a, "traced_model.pt"))
_lowerCAmelCase : int = torch.jit.load(os.path.join(__a, "traced_model.pt"), map_location=__a)
loaded(inputs_dict["input_ids"].to(__a), inputs_dict["attention_mask"].to(__a))
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
_lowerCAmelCase : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(__a)[0]
_lowerCAmelCase : Dict = torch.Size((1, 11, 768))
self.assertEqual(output.shape, __a)
_lowerCAmelCase : str = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3], __a, atol=1E-4))
| 36
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 1
|
def lowerCAmelCase_ ( __A = 1_000 ) -> int:
'''simple docstring'''
UpperCAmelCase__ = 2**power
UpperCAmelCase__ = str(__A )
UpperCAmelCase__ = list(__A )
UpperCAmelCase__ = 0
for i in list_num:
sum_of_num += int(__A )
return sum_of_num
if __name__ == "__main__":
UpperCamelCase__ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
UpperCamelCase__ = solution(power)
print('Sum of the digits is: ', result)
| 143
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
assert base_extractor.is_extractable(__A )
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
UpperCAmelCase__ = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
UpperCAmelCase__ = Extractor.infer_extractor_format(__A )
assert extractor_format is not None
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__A, __A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_dot_dot"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__A, "w" ) as f:
f.add(__A, arcname=os.path.join("..", text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_sym_link"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_sym_link.tar"
os.symlink("..", directory / "subdir", target_is_directory=__A )
with tarfile.TarFile(__A, "w" ) as f:
f.add(str(directory / "subdir" ), arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log", [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
UpperCAmelCase__ = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ = tmp_path / "extracted"
TarExtractor.extract(__A, __A )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__A )
assert zipfile.is_zipfile(str(__A ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__A ) # but we're right
| 143
| 1
|
'''simple docstring'''
import sys
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =len(_lowerCAmelCase )
__lowercase =[[0 for x in range(_lowerCAmelCase )] for x in range(_lowerCAmelCase )]
__lowercase =[[0 for x in range(_lowerCAmelCase )] for x in range(_lowerCAmelCase )]
for chain_length in range(2 , _lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
__lowercase =a + chain_length - 1
__lowercase =sys.maxsize
for c in range(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowercase =cost
__lowercase =c
return matrix, sol
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if i == j:
print('A' + str(_lowerCAmelCase ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(_lowerCAmelCase , _lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(_lowerCAmelCase , optimal_solution[i][j] + 1 , _lowerCAmelCase )
print(')' , end=' ' )
def _A ( ):
"""simple docstring"""
__lowercase =[30, 35, 15, 5, 10, 20, 25]
__lowercase =len(_lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowercase , __lowercase =matrix_chain_order(_lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 166
|
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[True] * limit
__lowercase =False
__lowercase =False
__lowercase =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__lowercase =i * 2
while index < limit:
__lowercase =False
__lowercase =index + i
__lowercase =[2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def _A ( _lowerCAmelCase = 1_000_000 ):
"""simple docstring"""
__lowercase =prime_sieve(_lowerCAmelCase )
__lowercase =0
__lowercase =0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
__lowercase =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__lowercase =j - i
__lowercase =sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 166
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __lowercase (snake_case__ , snake_case__ ):
"""simple docstring"""
_UpperCAmelCase = """convnextv2"""
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0.0 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : str = num_stages
SCREAMING_SNAKE_CASE_ : Any = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE_ : Any = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE_ : str = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 365
|
import socket
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Dict = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_ : Any = socket.gethostname()
SCREAMING_SNAKE_CASE_ : List[str] = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file', 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE_ : Tuple = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(A__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 162
| 0
|
def A_ ( snake_case : int , snake_case : int ) -> int:
'''simple docstring'''
return 1 if input_a == input_a else 0
def A_ ( ) -> None:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 328
|
from __future__ import annotations
from collections.abc import Callable
def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float:
'''simple docstring'''
__UpperCamelCase = x_start
__UpperCamelCase = fnc(snake_case )
__UpperCamelCase = 0.0
for _ in range(snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__UpperCamelCase = (x_end - x_start) / steps + xa
__UpperCamelCase = fnc(snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__UpperCamelCase = xa
__UpperCamelCase = fxa
return area
if __name__ == "__main__":
def A_ ( snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowercase__ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 1_0
| 328
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 226
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """longformer"""
def __init__( self : List[Any] , __UpperCAmelCase : Union[List[int], int] = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 30522 , __UpperCAmelCase : int = 768 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 3072 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 1e-12 , __UpperCAmelCase : bool = False , **__UpperCAmelCase : int , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : int = attention_window
a : List[str] = sep_token_id
a : List[Any] = bos_token_id
a : int = eos_token_id
a : Union[str, Any] = vocab_size
a : Dict = hidden_size
a : int = num_hidden_layers
a : int = num_attention_heads
a : Optional[int] = hidden_act
a : Union[str, Any] = intermediate_size
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Dict = type_vocab_size
a : Optional[Any] = initializer_range
a : List[Any] = layer_norm_eps
a : List[str] = onnx_export
class _A ( _a ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : "PretrainedConfig" , __UpperCAmelCase : str = "default" , __UpperCAmelCase : "List[PatchingSpec]" = None):
super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = True
@property
def __snake_case ( self : int):
if self.task == "multiple-choice":
a : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
])
@property
def __snake_case ( self : Union[str, Any]):
a : str = super().outputs
if self.task == "default":
a : Tuple = {0: "batch"}
return outputs
@property
def __snake_case ( self : Optional[int]):
return 1e-4
@property
def __snake_case ( self : str):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def __snake_case ( self : List[str] , __UpperCAmelCase : "PreTrainedTokenizerBase" , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
a : str = super().generate_dummy_inputs(
preprocessor=__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
a : Union[str, Any] = torch.zeros_like(inputs["input_ids"])
# make every second token global
a : Dict = 1
return inputs
| 226
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> int:
_snake_case = len(_snake_case ), len(grid[0] )
if (
min(_snake_case , _snake_case ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_snake_case = 0
count += depth_first_search(_snake_case , row + 1 , _snake_case , _snake_case )
count += depth_first_search(_snake_case , row - 1 , _snake_case , _snake_case )
count += depth_first_search(_snake_case , _snake_case , col + 1 , _snake_case )
count += depth_first_search(_snake_case , _snake_case , col - 1 , _snake_case )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : int ) ->List[Any]:
"""simple docstring"""
if openai_config_file == "":
__snake_case : Dict = OpenAIGPTConfig()
else:
__snake_case : int = OpenAIGPTConfig.from_json_file(_snake_case )
__snake_case : Tuple = OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__snake_case : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__snake_case : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 102
| 0
|
'''simple docstring'''
__snake_case : int = 8.314462 # Unit - J mol-1 K-1
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 136
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
# fmt: off
A__ : Optional[int] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : List[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Tuple =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ : int ={"""unk_token""": """<unk>"""}
A__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
A__ : Dict ={
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , **lowerCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ : int =[Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : int =self.get_tokenizer()
A__ : Optional[int] =self.get_rust_tokenizer()
A__ : Any =self.get_image_processor()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : List[Any] =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Union[str, Any] =self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : int =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Dict =self.prepare_image_inputs()
A__ : List[Any] =image_processor(lowerCAmelCase_ , return_tensors="""np""" )
A__ : List[Any] =processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Any ="""lower newer"""
A__ : Optional[int] =processor(text=lowerCAmelCase_ )
A__ : Optional[int] =tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Optional[Any] ="""lower newer"""
A__ : List[str] =self.prepare_image_inputs()
A__ : Optional[Any] =processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : List[str] =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Tuple =self.prepare_image_inputs()
A__ : str =self.prepare_image_inputs()
A__ : int =processor(images=lowerCAmelCase_ , visual_prompt=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.get_image_processor()
A__ : Optional[int] =self.get_tokenizer()
A__ : Any =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Tuple =processor.batch_decode(lowerCAmelCase_ )
A__ : List[Any] =tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 136
| 1
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Any , A : int=None , **A : str ) -> Union[str, Any]:
super().__init__(features=A )
lowercase_ : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A ( self : Dict , A : int ) -> List[Any]:
import torch
if isinstance(A , A ) and column:
if all(
isinstance(A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A )
return column
def A ( self : int , A : Any ) -> Optional[Any]:
import torch
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Any = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : Any = {'''dtype''': torch.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : Dict = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
lowercase_ : Dict = np.asarray(A )
return torch.tensor(A , **{**default_dtype, **self.torch_tensor_kwargs} )
def A ( self : Union[str, Any] , A : Optional[int] ) -> str:
import torch
# support for torch, tf, jax etc.
if hasattr(A , '''__array__''' ) and not isinstance(A , torch.Tensor ):
lowercase_ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def A ( self : Dict , A : dict ) -> Tuple:
return map_nested(self._recursive_tensorize , A , map_list=A )
def A ( self : str , A : pa.Table ) -> Mapping:
lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(A )
lowercase_ : str = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def A ( self : List[Any] , A : pa.Table ) -> "torch.Tensor":
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(A )
lowercase_ : str = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
lowercase_ : Optional[int] = self.recursive_tensorize(A )
lowercase_ : Any = self._consolidate(A )
return column
def A ( self : List[str] , A : pa.Table ) -> Mapping:
lowercase_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
lowercase_ : int = self.python_features_decoder.decode_batch(A )
lowercase_ : Dict = self.recursive_tensorize(A )
for column_name in batch:
lowercase_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 33
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCAmelCase_ = """
import os
"""
UpperCAmelCase_ = """
def foo():
import os
return False
"""
UpperCAmelCase_ = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
UpperCAmelCase_ = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
except:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
UpperCAmelCase_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = os.path.join(UpperCamelCase__ , 'test_file.py' )
with open(UpperCamelCase__ , 'w' ) as _tmp_file:
_tmp_file.write(UpperCamelCase__ )
_snake_case = get_imports(UpperCamelCase__ )
assert parsed_imports == ["os"]
| 295
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 1
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a_ = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase__ )
class __lowerCAmelCase :
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
@dataclass(frozen=lowerCAmelCase__ )
class __lowerCAmelCase :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase=False , __UpperCAmelCase = False , ):
'''simple docstring'''
__lowerCamelCase = hans_processors[task]()
__lowerCamelCase = os.path.join(
__UpperCAmelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__UpperCAmelCase ) , __UpperCAmelCase , ) , )
__lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase ,__lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '''.lock'''
with FileLock(__UpperCAmelCase ):
if os.path.exists(__UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
__lowerCamelCase = torch.load(__UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
__lowerCamelCase = (
processor.get_dev_examples(__UpperCAmelCase ) if evaluate else processor.get_train_examples(__UpperCAmelCase )
)
logger.info('''Training examples: %s''' , len(__UpperCAmelCase ) )
__lowerCamelCase = hans_convert_examples_to_features(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
logger.info('''Saving features into cached file %s''' , __UpperCAmelCase )
torch.save(self.features , __UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
return self.features[i]
def lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase :
lowerCAmelCase__ = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 128 , __UpperCAmelCase=False , __UpperCAmelCase = False , ):
'''simple docstring'''
__lowerCamelCase = hans_processors[task]()
__lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase ,__lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
__lowerCamelCase = processor.get_dev_examples(__UpperCAmelCase ) if evaluate else processor.get_train_examples(__UpperCAmelCase )
__lowerCamelCase = hans_convert_examples_to_features(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__lowerCamelCase = tf.data.Dataset.from_generator(
__UpperCAmelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowerCamelCase ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
return self.features[i]
def lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__UpperCAmelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__UpperCAmelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def lowerCamelCase ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
for i, line in enumerate(__UpperCAmelCase ):
if i == 0:
continue
__lowerCamelCase = '''%s-%s''' % (set_type, line[0])
__lowerCamelCase = line[5]
__lowerCamelCase = line[6]
__lowerCamelCase = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__lowerCamelCase = line[0]
examples.append(InputExample(guid=__UpperCAmelCase , text_a=__UpperCAmelCase , text_b=__UpperCAmelCase , label=__UpperCAmelCase , pairID=__UpperCAmelCase ) )
return examples
def a__ ( _UpperCamelCase : List[InputExample] ,_UpperCamelCase : List[str] ,_UpperCamelCase : int ,_UpperCamelCase : PreTrainedTokenizer ,):
__lowerCamelCase = {label: i for i, label in enumerate(_UpperCamelCase )}
__lowerCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCamelCase ) ,desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__lowerCamelCase = tokenizer(
example.text_a ,example.text_b ,add_special_tokens=_UpperCamelCase ,max_length=_UpperCamelCase ,padding='''max_length''' ,truncation=_UpperCamelCase ,return_overflowing_tokens=_UpperCamelCase ,)
__lowerCamelCase = label_map[example.label] if example.label in label_map else 0
__lowerCamelCase = int(example.pairID )
features.append(InputFeatures(**_UpperCamelCase ,label=_UpperCamelCase ,pairID=_UpperCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
a_ = {
"""hans""": 3,
}
a_ = {
"""hans""": HansProcessor,
}
| 330
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return {}, {}, {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = load_image(__UpperCAmelCase )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.model(**__UpperCAmelCase )
return model_outputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(__UpperCAmelCase )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 330
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : int = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : str = 'openai-gpt'
__lowercase : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=4_0478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> Any:
A_ = vocab_size
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = afn
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = summary_type
A_ = summary_use_proj
A_ = summary_activation
A_ = summary_first_dropout
A_ = summary_proj_to_labels
super().__init__(**__UpperCAmelCase )
| 353
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> Optional[Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
A_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = self.num_labels
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self ) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def __A ( self ) -> Tuple:
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Dict:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A_ = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> int:
pass
@slow
def __A ( self ) -> Dict:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = '''add'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
A_ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A_ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_SCREAMING_SNAKE_CASE )
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18
| 0
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase : Union[str, Any] = logging.get_logger(__name__)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = question_encoder
_snake_case = generator
_snake_case = self.question_encoder
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_snake_case = os.path.join(lowerCAmelCase_ , 'question_encoder_tokenizer' )
_snake_case = os.path.join(lowerCAmelCase_ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowerCAmelCase_ )
self.generator.save_pretrained(lowerCAmelCase_ )
@classmethod
def lowerCamelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
_snake_case = kwargs.pop('config' , lowerCAmelCase_ )
if config is None:
_snake_case = RagConfig.from_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_snake_case = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.question_encoder
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.generator
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "longest" , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
"""simple docstring"""
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowerCAmelCase_ , )
if max_length is None:
_snake_case = self.current_tokenizer.model_max_length
_snake_case = self(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_snake_case = self.current_tokenizer.model_max_length
_snake_case = self(
text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = labels['input_ids']
return model_inputs
| 42
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import torch
from transformers import AutoModel
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self: str , UpperCamelCase: List[str]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(UpperCamelCase , self ).__init__()
A__ = AutoModel.from_pretrained(UpperCamelCase , return_dict=UpperCamelCase )
A__ = torch.nn.CosineSimilarity(3 , 1e-0_8 )
A__ = torch.nn.Softmax(dim=1 )
def UpperCamelCase ( self: Optional[Any] , **UpperCamelCase: Optional[Any] ):
"""simple docstring"""
return self.bert(**UpperCamelCase ).last_hidden_state
def UpperCamelCase ( self: Any , UpperCamelCase: Any ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=UpperCamelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: str=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(UpperCamelCase , UpperCamelCase ) )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = W_supports["""sizes"""].tolist()
A__ = W_supports["""start_token_id"""].item()
A__ = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A__ = self.BERT(**UpperCamelCase )
A__ = self.BERT(**UpperCamelCase )
A__ = None
A__ = None
A__ = W_supports["""input_ids"""] == start_token_id
A__ = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(UpperCamelCase ):
if i == 0:
A__ = 0
else:
A__ = support_sizes[i - 1]
A__ = S[s : s + size][start_token_masks[s : s + size]]
A__ = S[s : s + size][end_token_masks[s : s + size]]
A__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
A__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A__ = torch.vstack((p_starts, p_start) )
A__ = torch.vstack((p_ends, p_end) )
else:
A__ = p_start
A__ = p_end
return p_starts, p_ends
| 69
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 50 ):
A__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 69
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : str = ["pixel_values"]
def __init__( self : str , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BICUBIC , A : bool = True , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : Dict[str, int] = None , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Dict , ) -> None:
super().__init__(**A )
lowercase_ : Tuple = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : Dict = get_size_dict(A )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : List[Any] = get_size_dict(A , default_to_square=A , param_name='''crop_size''' )
lowercase_ : Optional[Any] = do_resize
lowercase_ : List[str] = do_rescale
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : Optional[Any] = size
lowercase_ : Optional[Any] = resample
lowercase_ : Optional[Any] = rescale_factor
lowercase_ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A ( self : str , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BILINEAR , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
lowercase_ : str = get_size_dict(A )
if "shortest_edge" in size:
lowercase_ : Dict = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase_ : List[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(A , size=A , resample=A , data_format=A , **A )
def A ( self : str , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
lowercase_ : List[Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def A ( self : Tuple , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Any ) -> np.ndarray:
return rescale(A , scale=A , data_format=A , **A )
def A ( self : Optional[int] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Tuple , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def A ( self : Dict , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : int = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : List[str] , ) -> BatchFeature:
lowercase_ : str = do_resize if do_resize is not None else self.do_resize
lowercase_ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : str = crop_size if crop_size is not None else self.crop_size
lowercase_ : int = get_size_dict(A , param_name='''crop_size''' , default_to_square=A )
lowercase_ : Union[str, Any] = resample if resample is not None else self.resample
lowercase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : Tuple = image_mean if image_mean is not None else self.image_mean
lowercase_ : Tuple = image_std if image_std is not None else self.image_std
lowercase_ : Union[str, Any] = size if size is not None else self.size
lowercase_ : str = get_size_dict(A )
if not is_batched(A ):
lowercase_ : List[Any] = [images]
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase_ : Tuple = [to_numpy_array(A ) for image in images]
if do_resize:
lowercase_ : List[str] = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
lowercase_ : List[Any] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
lowercase_ : List[Any] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
lowercase_ : Dict = [self.normalize(image=A , mean=A , std=A ) for image in images]
lowercase_ : List[str] = [to_channel_dimension_format(A , A ) for image in images]
lowercase_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A )
| 33
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'could not parse string as bool {string}' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
__snake_case = parser.parse_args()
__snake_case = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 176
| 0
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : str , **__A : Optional[Any] ):
requires_backends(self , ["bs4"] )
super().__init__(**__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Any = []
snake_case__ : Any = []
snake_case__ : List[str] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
snake_case__ : Union[str, Any] = parent.find_all(child.name , recursive=__A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__A ) else next(i for i, s in enumerate(__A , 1 ) if s is child ) )
snake_case__ : Tuple = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self : Optional[int] , __A : List[Any] ):
snake_case__ : int = BeautifulSoup(__A , "html.parser" )
snake_case__ : int = []
snake_case__ : str = []
snake_case__ : Any = []
for element in html_code.descendants:
if type(__A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
snake_case__ : Dict = html.unescape(__A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__A )
snake_case__, snake_case__ : Optional[Any] = self.xpath_soup(__A )
stringaxtag_seq.append(__A )
stringaxsubs_seq.append(__A )
if len(__A ) != len(__A ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(__A ) != len(__A ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self : Optional[int] , __A : List[str] , __A : int ):
snake_case__ : Optional[int] = ""
for tagname, subs in zip(__A , __A ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , __A : Optional[Any] ):
snake_case__ : int = False
# Check that strings has a valid type
if isinstance(__A , __A ):
snake_case__ : Optional[Any] = True
elif isinstance(__A , (list, tuple) ):
if len(__A ) == 0 or isinstance(html_strings[0] , __A ):
snake_case__ : List[str] = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f'''but is of type {type(__A )}.''' )
snake_case__ : Optional[int] = bool(isinstance(__A , (list, tuple) ) and (isinstance(html_strings[0] , __A )) )
if not is_batched:
snake_case__ : Union[str, Any] = [html_strings]
# Get nodes + xpaths
snake_case__ : str = []
snake_case__ : Optional[int] = []
for html_string in html_strings:
snake_case__, snake_case__, snake_case__ : str = self.get_three_from_single(__A )
nodes.append(__A )
snake_case__ : List[Any] = []
for node, tag_list, sub_list in zip(__A , __A , __A ):
snake_case__ : List[Any] = self.construct_xpath(__A , __A )
xpath_strings.append(__A )
xpaths.append(__A )
# return as Dict
snake_case__ : str = {"nodes": nodes, "xpaths": xpaths}
snake_case__ : Union[str, Any] = BatchFeature(data=__A , tensor_type=__A )
return encoded_inputs
| 286
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 286
| 1
|
'''simple docstring'''
import re
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : Tuple = split_input(lowerCamelCase_ )
if upper:
SCREAMING_SNAKE_CASE : Optional[int] = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE : Any = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return to_simple_case(lowerCamelCase_ )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : str = to_simple_case(lowerCamelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , """_""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , """-""" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 323
|
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323
| 1
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : str ={}
def snake_case ( self : Union[str, Any] )-> None:
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase, ''' -> ''', ''' -> '''.join([str(lowerCamelCase ) for j in self.vertex[i]] ) )
def snake_case ( self : Dict, lowerCamelCase : int, lowerCamelCase : int )-> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase )
else:
# else make a new vertex
lowerCamelCase__ : int =[to_vertex]
def snake_case ( self : List[Any] )-> None:
# visited array for storing already visited nodes
lowerCamelCase__ : Dict =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase, lowerCamelCase )
def snake_case ( self : str, lowerCamelCase : int, lowerCamelCase : list )-> None:
# mark start vertex as visited
lowerCamelCase__ : Optional[int] =True
print(lowerCamelCase, end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase, lowerCamelCase )
if __name__ == "__main__":
_lowercase : List[str] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 272
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Union[str, Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : str )-> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] =controlnet_params
lowerCamelCase__ : Dict ='''bird'''
lowerCamelCase__ : List[str] =jax.device_count()
lowerCamelCase__ : Optional[Any] =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : Optional[int] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Dict =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : Tuple =replicate(lowerCamelCase )
lowerCamelCase__ : Tuple =shard(lowerCamelCase )
lowerCamelCase__ : Optional[int] =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : Any =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Dict =jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Dict =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[Any] =controlnet_params
lowerCamelCase__ : int ='''Chef in the kitchen'''
lowerCamelCase__ : Optional[Any] =jax.device_count()
lowerCamelCase__ : Any =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : int =replicate(lowerCamelCase )
lowerCamelCase__ : List[Any] =shard(lowerCamelCase )
lowerCamelCase__ : int =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : int =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Any =jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 272
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float:
A_ : Dict = a
while True:
A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class _UpperCamelCase ( _lowercase ):
'''simple docstring'''
lowerCAmelCase__ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase__ = Features({"""text""": Value("""string""" )} )
lowerCAmelCase__ = Features({} )
lowerCAmelCase__ = """text"""
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return {self.text_column: "text"}
| 359
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """spiece.model"""}
lowerCamelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowerCamelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowerCamelCase = 0
lowerCamelCase = 1
lowerCamelCase = 2
lowerCamelCase = 3
lowerCamelCase = 4
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = """left"""
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : Union[str, Any]="</s>" , _lowerCAmelCase : int="<unk>" , _lowerCAmelCase : Union[str, Any]="<sep>" , _lowerCAmelCase : Union[str, Any]="<pad>" , _lowerCAmelCase : Union[str, Any]="<cls>" , _lowerCAmelCase : List[Any]="<mask>" , _lowerCAmelCase : List[Any]=["<eop>", "<eod>"] , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : str , ):
'''simple docstring'''
__lowercase =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else mask_token
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__lowercase =3
__lowercase =do_lower_case
__lowercase =remove_space
__lowercase =keep_accents
__lowercase =vocab_file
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCAmelCase)
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return len(self.sp_model)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase ={self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : str):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =None
return state
def __setstate__( self : List[Any] , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Any):
'''simple docstring'''
if self.remove_space:
__lowercase =' '.join(inputs.strip().split())
else:
__lowercase =inputs
__lowercase =outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__lowercase =unicodedata.normalize('NFKD' , _lowerCAmelCase)
__lowercase =''.join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase)])
if self.do_lower_case:
__lowercase =outputs.lower()
return outputs
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =self.preprocess_text(_lowerCAmelCase)
__lowercase =self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase)
__lowercase =[]
for piece in pieces:
if len(_lowerCAmelCase) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__lowercase =self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__lowercase =cur_pieces[1:]
else:
__lowercase =cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowerCAmelCase)
else:
new_pieces.append(_lowerCAmelCase)
return new_pieces
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[str]):
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCAmelCase)
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =''.join(_lowerCAmelCase).replace(_lowerCAmelCase , ' ').strip()
return out_string
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase =kwargs.pop('use_source_tokenizer' , _lowerCAmelCase)
__lowercase =self.convert_ids_to_tokens(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase =[]
__lowercase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase))
__lowercase =[]
sub_texts.append(_lowerCAmelCase)
else:
current_sub_text.append(_lowerCAmelCase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowercase =''.join(_lowerCAmelCase)
__lowercase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase =self.clean_up_tokenization(_lowerCAmelCase)
return clean_text
else:
return text
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase)
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase)) + [1] + ([0] * len(_lowerCAmelCase)) + [1, 1]
return ([0] * len(_lowerCAmelCase)) + [1, 1]
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowercase =os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase , 'wb') as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 48
| 0
|
'''simple docstring'''
def A_ ( snake_case ):
if num < 0:
return False
SCREAMING_SNAKE_CASE:List[str] = num
SCREAMING_SNAKE_CASE:Dict = 0
while num > 0:
SCREAMING_SNAKE_CASE:Union[str, Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347
| 0
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="None", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCAmelCase_: Any = parent
UpperCAmelCase_: Union[str, Any] = batch_size
UpperCAmelCase_: List[str] = seq_length
UpperCAmelCase_: str = is_training
UpperCAmelCase_: str = use_input_mask
UpperCAmelCase_: List[str] = use_token_type_ids
UpperCAmelCase_: Tuple = use_labels
UpperCAmelCase_: Any = vocab_size
UpperCAmelCase_: int = hidden_size
UpperCAmelCase_: Optional[int] = num_hidden_layers
UpperCAmelCase_: Optional[int] = num_attention_heads
UpperCAmelCase_: Optional[int] = intermediate_size
UpperCAmelCase_: Optional[Any] = hidden_act
UpperCAmelCase_: Optional[Any] = hidden_dropout_prob
UpperCAmelCase_: int = attention_probs_dropout_prob
UpperCAmelCase_: Optional[int] = max_position_embeddings
UpperCAmelCase_: Any = type_vocab_size
UpperCAmelCase_: int = type_sequence_label_size
UpperCAmelCase_: str = initializer_range
UpperCAmelCase_: int = num_labels
UpperCAmelCase_: Any = num_choices
UpperCAmelCase_: List[str] = relative_attention
UpperCAmelCase_: str = position_biased_input
UpperCAmelCase_: List[Any] = pos_att_type
UpperCAmelCase_: Optional[Any] = scope
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_: str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase_: Any = None
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Dict = None
if self.use_labels:
UpperCAmelCase_: Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: Dict = DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=SCREAMING_SNAKE_CASE_, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = TFDebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase_: Optional[Any] = [input_ids, input_mask]
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: str = TFDebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Optional[Any] = self.num_labels
UpperCAmelCase_: Optional[int] = TFDebertaVaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase_: List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCAmelCase_: List[Any] = self.num_labels
UpperCAmelCase_: Union[str, Any] = TFDebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = TFDebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): int = config_and_inputs
UpperCAmelCase_: str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
A = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Any = TFDebertaVaModelTester(self )
UpperCAmelCase_: int = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> str:
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Any = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class _a ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __snake_case (self ) -> Any:
pass
@slow
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
UpperCAmelCase_: str = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCAmelCase_: int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: Dict = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4], SCREAMING_SNAKE_CASE_, atol=1E-4 )
| 82
|
a : Tuple = 'Tobias Carryer'
from time import time
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=int(time() ) ) -> List[Any]: # noqa: B008
UpperCAmelCase_: List[str] = multiplier
UpperCAmelCase_: Tuple = increment
UpperCAmelCase_: Tuple = modulo
UpperCAmelCase_: List[str] = seed
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a : Optional[int] = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 82
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCamelCase = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_UpperCamelCase = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_UpperCamelCase = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = False , A_ = False , A_ = False , ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = len(references[0] )
if any(len(A_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(A_ )]
__lowerCAmelCase : Optional[int] = TER(
normalized=A_ , no_punct=A_ , asian_support=A_ , case_sensitive=A_ , )
__lowerCAmelCase : Union[str, Any] = sb_ter.corpus_score(A_ , A_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 275
|
from collections import deque
from .hash_table import HashTable
class __lowercase (_UpperCAmelCase ):
def __init__( self , *A_ , **A_ ) ->int:
'''simple docstring'''
super().__init__(*A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(A_ )
__lowerCAmelCase : int = self.values[key]
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
return (
sum(self.charge_factor - len(A_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def UpperCamelCase__ ( self , A_ , A_=None ) ->str:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(A_ ) == 0
):
return key
return super()._collision_resolution(A_ , A_ )
| 275
| 1
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ) -> Dict:
lowercase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase__ = model
lowercase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = model.module
if not keep_fpaa_wrapper:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
lowercase__ = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
lowercase__ = forward.__wrapped__
if forward == original_forward:
break
lowercase__ = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase__ = model
lowercase__ = compiled_model
return model
def __UpperCamelCase () -> Tuple:
PartialState().wait_for_everyone()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def __UpperCamelCase (**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for key, value in kwargs.items():
lowercase__ = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowercase__ = value
return destination
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = None ) -> bool:
if port is None:
lowercase__ = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 269
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 269
| 1
|
from bisect import bisect
from itertools import accumulate
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= sorted(zip(lowercase__ , lowercase__ ) , key=lambda lowercase__ : x[0] / x[1] , reverse=lowercase__ )
__lowercase, __lowercase= [i[0] for i in r], [i[1] for i in r]
__lowercase= list(accumulate(lowercase__ ) )
__lowercase= bisect(lowercase__ , lowercase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 295
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = """▁"""
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_SCREAMING_SNAKE_CASE = {"""vinai/bartpho-syllable""": 1_0_2_4}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : Optional[int]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : int="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = vocab_file
UpperCamelCase = monolingual_vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCamelCase = {}
UpperCamelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
UpperCamelCase = cnt
cnt += 1
with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCamelCase = line.strip().split()[0]
UpperCamelCase = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
UpperCamelCase = len(self.fairseq_tokens_to_ids )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ ).replace(lowerCamelCase_ , """ """ ).strip()
return out_string
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , """wb""" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(lowerCamelCase_ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 165
|
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = len(matrix[0] )
UpperCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase = matrix[i], matrix[row]
UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
| 1
|
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : int = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase__ : Optional[int] = {
'facebook/blenderbot_small-90M': 5_1_2,
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = BlenderbotSmallTokenizer
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> List[str]:
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=SCREAMING_SNAKE_CASE__ , merges=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , ) , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Dict = add_prefix_space
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25
|
from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18
| 0
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = None
def a ( self : List[Any] ) -> int:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , "feat_extract.json" )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a ( self : str ) -> List[str]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE__ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = self.feature_extraction_class()
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 221
|
import random
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase_ )
elif element > pivot:
greater.append(lowerCAmelCase_ )
else:
equal.append(lowerCAmelCase_ )
return less, equal, greater
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int ):
"""simple docstring"""
if index >= len(lowerCAmelCase_ ) or index < 0:
return None
lowerCAmelCase__ = items[random.randint(0 , len(lowerCAmelCase_ ) - 1 )]
lowerCAmelCase__ = 0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _partition(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = len(lowerCAmelCase_ )
lowerCAmelCase__ = len(lowerCAmelCase_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase_ , lowerCAmelCase_ )
# must be in larger
else:
return quick_select(lowerCAmelCase_ , index - (m + count) )
| 221
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
a : Tuple = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'tapas'
def __init__( self , A=30522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=1024 , A=[3, 256, 256, 2, 256, 256, 10] , A=0.0_2 , A=1e-12 , A=0 , A=1_0.0 , A=0 , A=1.0 , A=None , A=1.0 , A=False , A=None , A=1.0 , A=1.0 , A=False , A=False , A="ratio" , A=None , A=None , A=64 , A=32 , A=False , A=True , A=False , A=False , A=True , A=False , A=None , A=None , **A , ) -> List[Any]:
super().__init__(pad_token_id=A , **A )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Union[str, Any] = type_vocab_sizes
UpperCAmelCase : int = initializer_range
UpperCAmelCase : int = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase : Optional[Any] = positive_label_weight
UpperCAmelCase : Union[str, Any] = num_aggregation_labels
UpperCAmelCase : List[str] = aggregation_loss_weight
UpperCAmelCase : str = use_answer_as_supervision
UpperCAmelCase : int = answer_loss_importance
UpperCAmelCase : Dict = use_normalized_answer_loss
UpperCAmelCase : str = huber_loss_delta
UpperCAmelCase : Union[str, Any] = temperature
UpperCAmelCase : Optional[Any] = aggregation_temperature
UpperCAmelCase : Optional[Any] = use_gumbel_for_cells
UpperCAmelCase : Optional[Any] = use_gumbel_for_aggregation
UpperCAmelCase : int = average_approximation_function
UpperCAmelCase : Tuple = cell_selection_preference
UpperCAmelCase : Dict = answer_loss_cutoff
UpperCAmelCase : Optional[int] = max_num_rows
UpperCAmelCase : Optional[int] = max_num_columns
UpperCAmelCase : int = average_logits_per_cell
UpperCAmelCase : Dict = select_one_column
UpperCAmelCase : Optional[int] = allow_empty_column_selection
UpperCAmelCase : Union[str, Any] = init_cell_selection_weights_to_zero
UpperCAmelCase : str = reset_position_index_per_cell
UpperCAmelCase : str = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase : Dict = aggregation_labels
UpperCAmelCase : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A ):
UpperCAmelCase : Optional[int] = {int(A ): v for k, v in aggregation_labels.items()}
| 338
|
'''simple docstring'''
import numpy as np
class UpperCamelCase_ :
def __init__( self ) -> int:
UpperCAmelCase : str = (0, 0)
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Any = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[int] = 0
def __eq__( self , A ) -> Optional[Any]:
return self.position == cell.position
def _lowercase( self ) -> Tuple:
print(self.position )
class UpperCamelCase_ :
def __init__( self , A=(5, 5) ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = np.zeros(A )
UpperCAmelCase : int = world_size[0]
UpperCAmelCase : List[str] = world_size[1]
def _lowercase( self ) -> List[Any]:
print(self.w )
def _lowercase( self , A ) -> Dict:
UpperCAmelCase : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase : List[Any] = cell.position[0]
UpperCAmelCase : Union[str, Any] = cell.position[1]
UpperCAmelCase : Optional[int] = []
for n in neughbour_cord:
UpperCAmelCase : Any = current_x + n[0]
UpperCAmelCase : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase : str = Cell()
UpperCAmelCase : List[str] = (x, y)
UpperCAmelCase : Dict = cell
neighbours.append(A )
return neighbours
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Optional[int] = []
_open.append(_lowercase )
while _open:
UpperCAmelCase : Any = np.argmin([n.f for n in _open] )
UpperCAmelCase : Optional[int] = _open[min_f]
_closed.append(_open.pop(_lowercase ) )
if current == goal:
break
for n in world.get_neigbours(_lowercase ):
for c in _closed:
if c == n:
continue
UpperCAmelCase : List[str] = current.g + 1
UpperCAmelCase , UpperCAmelCase : List[str] = n.position
UpperCAmelCase , UpperCAmelCase : Dict = goal.position
UpperCAmelCase : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase : Dict = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowercase )
UpperCAmelCase : Dict = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase : Optional[int] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a : List[str] = Gridworld()
# Start position and goal
a : Optional[int] = Cell()
a : Optional[Any] = (0, 0)
a : Optional[Any] = Cell()
a : str = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
a : List[Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a : Any = 1
print(world.w)
| 338
| 1
|
'''simple docstring'''
import itertools
import math
def _A (lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A () -> List[str]:
'''simple docstring'''
_a = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def _A (lowerCAmelCase__ :int = 1_00_01 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 168
|
'''simple docstring'''
import math
class a :
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
_a = 0.0
_a = 0.0
for i in range(len(__magic_name__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> list[list[int | float]]:
for i in range(len(__magic_name__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _A () -> None:
'''simple docstring'''
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowerCAmelCase__ ):
for j in range(len(lowerCAmelCase__ ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# Update the winning vector
_a = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 168
| 1
|
import os
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
a__ : Optional[int] = os.path.join(os.path.dirname(_lowercase) , """num.txt""")
with open(_lowercase) as file_hand:
return str(sum(int(_lowercase) for line in file_hand))[:10]
if __name__ == "__main__":
print(solution())
| 266
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case__ (ctypes.Structure ):
"""simple docstring"""
__lowerCAmelCase :Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : int = CursorInfo()
a__ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25l""")
sys.stdout.flush()
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : List[Any] = CursorInfo()
a__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25h""")
sys.stdout.flush()
@contextmanager
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266
| 1
|
'''simple docstring'''
from collections import namedtuple
__lowercase = namedtuple('''from_to''', '''from_ to''')
__lowercase = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1_0_0_0),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00_454, 264.172),
'''cubicyard''': from_to(0.76_455, 1.30_795),
'''cubicfoot''': from_to(0.028, 35.3_147),
'''cup''': from_to(0.000_236_588, 4_226.75),
}
def snake_case__ ( _A: float , _A: str , _A: str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ """, """.join(_A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ """, """.join(_A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272
|
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__lowerCAmelCase = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ['input_ids', 'attention_mask']
lowerCAmelCase : List[int] = []
def __init__( self : int ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str]="<unk>" ,_UpperCAmelCase : str="<s>" ,_UpperCAmelCase : Any="</s>" ,_UpperCAmelCase : Any="<pad>" ,_UpperCAmelCase : Tuple="[SEP]" ,_UpperCAmelCase : int="[MASK]" ,_UpperCAmelCase : List[str]="[CLS]" ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,**_UpperCAmelCase : List[str] ,):
_a : List[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else bos_token
_a : Dict = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else eos_token
_a : List[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else unk_token
_a : List[str] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else pad_token
_a : Dict = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else cls_token
_a : Optional[int] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_a : Optional[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else mask_token
_a : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCAmelCase ,)
_a : int = vocab_file
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def __lowercase ( self : Union[str, Any] ):
return self.sp_model.get_piece_size()
def __lowercase ( self : str ):
_a : Dict = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
_a : List[str] = self.__dict__.copy()
_a : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_UpperCAmelCase : Dict ):
_a : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Optional[Any] = {}
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ):
return self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : Any ):
return self.sp_model.piece_to_id(_UpperCAmelCase )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Tuple ):
_a : Tuple = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def __lowercase ( self : Tuple ,_UpperCAmelCase : Optional[int] ):
_a : Union[str, Any] = []
_a : int = ''
_a : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_a : Dict = True
_a : Tuple = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_a : List[str] = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = None ,_UpperCAmelCase : bool = True ,**_UpperCAmelCase : List[str] ,):
_a : Optional[Any] = kwargs.pop('use_source_tokenizer' ,_UpperCAmelCase )
_a : List[Any] = self.convert_ids_to_tokens(_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_a : str = []
_a : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
_a : Dict = []
sub_texts.append(_UpperCAmelCase )
else:
current_sub_text.append(_UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_a : List[str] = re.sub(R' (\[(MASK|SEP)\])' ,R'\1' ,' '.join(_UpperCAmelCase ) )
else:
_a : str = ''.join(_UpperCAmelCase )
_a : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_a : List[str] = self.clean_up_tokenization(_UpperCAmelCase )
return clean_text
else:
return text
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : str = os.path.join(
_UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase ,'wb' ) as fi:
_a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Any = [self.cls_token_id]
_a : str = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self : str ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : Any = [self.sep_token_id]
_a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 107
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowerCAmelCase = pytest.mark.integration
__lowerCAmelCase = {'''comet'''}
__lowerCAmelCase = importlib.util.find_spec('''fairseq''') is not None
__lowerCAmelCase = {'''code_eval'''}
__lowerCAmelCase = os.name == '''nt'''
__lowerCAmelCase = {'''bertscore''', '''frugalscore''', '''perplexity'''}
__lowerCAmelCase = importlib.util.find_spec('''transformers''') is not None
def __lowerCamelCase ( lowerCAmelCase_ ) -> Any:
@wraps(lowerCAmelCase_ )
def wrapper(self , lowerCAmelCase_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
@wraps(lowerCAmelCase_ )
def wrapper(self , lowerCAmelCase_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
@wraps(lowerCAmelCase_ )
def wrapper(self , lowerCAmelCase_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def __lowerCamelCase ( ) -> Tuple:
_a : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@local
class __magic_name__ ( parameterized.TestCase ):
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Optional[int] = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __lowercase ( self : Dict ,_UpperCAmelCase : Optional[Any] ):
_a : Tuple = '[...]'
_a : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' ,_UpperCAmelCase ) ).module_path )
_a : Optional[int] = datasets.load.import_main_class(metric_module.__name__ ,dataset=_UpperCAmelCase )
# check parameters
_a : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_UpperCAmelCase ,metric_module.__name__ ):
with self.use_local_metrics():
try:
_a : Optional[int] = doctest.testmod(_UpperCAmelCase ,verbose=_UpperCAmelCase ,raise_on_error=_UpperCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed ,0 )
self.assertGreater(results.attempted ,1 )
@slow
def __lowercase ( self : Tuple ,_UpperCAmelCase : Dict ):
_a : Tuple = '[...]'
_a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' ,_UpperCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
_a : int = doctest.testmod(_UpperCAmelCase ,verbose=_UpperCAmelCase ,raise_on_error=_UpperCAmelCase )
self.assertEqual(results.failed ,0 )
self.assertGreater(results.attempted ,1 )
@contextmanager
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_UpperCAmelCase ):
yield
else:
yield
@contextmanager
def __lowercase ( self : Optional[int] ):
def load_local_metric(_UpperCAmelCase : Tuple ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Tuple ):
return load_metric(os.path.join('metrics' ,_UpperCAmelCase ) ,*_UpperCAmelCase ,**_UpperCAmelCase )
with patch('datasets.load_metric' ) as mock_load_metric:
_a : Any = load_local_metric
yield
@classmethod
def __lowercase ( cls : str ,_UpperCAmelCase : List[str] ):
def wrapper(_UpperCAmelCase : int ):
_a : Optional[Any] = contextmanager(_UpperCAmelCase )
_a : Optional[int] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[str]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __magic_name__ ( _UpperCamelCase ):
def __lowercase ( self : int ,_UpperCAmelCase : Union[str, Any] ):
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
_a : int = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
import torch
def bert_cos_score_idf(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
_a : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
def load_from_checkpoint(lowerCAmelCase_ ):
class __magic_name__ :
def __lowercase ( self : str ,_UpperCAmelCase : Dict ,*_UpperCAmelCase : int ,**_UpperCAmelCase : str ):
assert len(_UpperCAmelCase ) == 2
_a : Dict = [0.19, 0.92]
return scores, sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
_a : Any = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
_a : Optional[Any] = load_from_checkpoint
yield
def __lowerCamelCase ( ) -> Tuple:
_a : Dict = load_metric(os.path.join('metrics' , 'seqeval' ) )
_a : Optional[int] = 'ERROR'
_a : Optional[Any] = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(lowerCAmelCase_ , match=re.escape(lowerCAmelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase_ )
| 107
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=A_ , speech_processor=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , feature_extractor=A_ , )
def __UpperCamelCase( self , A_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self.enable_attention_slicing(A_ )
@torch.no_grad()
def __call__( self , A_ , A_=1_6000 , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = self.speech_processor.feature_extractor(
A_ , return_tensors="pt" , sampling_rate=A_ ).input_features.to(self.device )
UpperCamelCase : Union[str, Any] = self.speech_model.generate(A_ , max_length=48_0000 )
UpperCamelCase : List[str] = self.speech_processor.tokenizer.batch_decode(A_ , skip_special_tokens=A_ , normalize=A_ )[
0
]
if isinstance(A_ , A_ ):
UpperCamelCase : List[Any] = 1
elif isinstance(A_ , A_ ):
UpperCamelCase : Tuple = len(A_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A_ )}.""" )
# get prompt text embeddings
UpperCamelCase : str = self.tokenizer(
A_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : str = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = text_embeddings.shape
UpperCamelCase : Any = text_embeddings.repeat(1 , A_ , 1 )
UpperCamelCase : str = text_embeddings.view(bs_embed * num_images_per_prompt , A_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : Dict = [""] * batch_size
elif type(A_ ) is not type(A_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !="""
F""" {type(A_ )}.""" )
elif isinstance(A_ , A_ ):
UpperCamelCase : Any = [negative_prompt]
elif batch_size != len(A_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
UpperCamelCase : int = negative_prompt
UpperCamelCase : str = text_input_ids.shape[-1]
UpperCamelCase : Union[str, Any] = self.tokenizer(
A_ , padding="max_length" , max_length=A_ , truncation=A_ , return_tensors="pt" , )
UpperCamelCase : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : Tuple = uncond_embeddings.shape[1]
UpperCamelCase : List[str] = uncond_embeddings.repeat(1 , A_ , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Optional[Any] = torch.randn(A_ , generator=A_ , device="cpu" , dtype=A_ ).to(
self.device )
else:
UpperCamelCase : Dict = torch.randn(A_ , generator=A_ , device=self.device , dtype=A_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : Optional[Any] = {}
if accepts_eta:
UpperCamelCase : Tuple = eta
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : int = self.scheduler.scale_model_input(A_ , A_ )
# predict the noise residual
UpperCamelCase : str = self.unet(A_ , A_ , encoder_hidden_states=A_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : List[Any] = noise_pred.chunk(2 )
UpperCamelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : List[str] = self.scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase : List[str] = 1 / 0.1_82_15 * latents
UpperCamelCase : List[str] = self.vae.decode(A_ ).sample
UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(A_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A_ , nsfw_content_detected=A_ )
| 52
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 0
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __lowercase ):
'''simple docstring'''
__UpperCAmelCase : Any = (KDPMaDiscreteScheduler,)
__UpperCAmelCase : Dict = 1_0
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCAmelCase__ )
return config
def __UpperCAmelCase ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def __UpperCAmelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
__a = model(UpperCAmelCase__ , UpperCAmelCase__ )
__a = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__a = output.prev_sample
__a = torch.sum(torch.abs(UpperCAmelCase__ ) )
__a = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def __UpperCAmelCase ( self ):
if torch_device == "mps":
return
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
__a = model(UpperCAmelCase__ , UpperCAmelCase__ )
__a = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__a = output.prev_sample
__a = torch.sum(torch.abs(UpperCAmelCase__ ) )
__a = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def __UpperCAmelCase ( self ):
if torch_device == "mps":
return
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
__a = self.dummy_model()
__a = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
__a = model(UpperCAmelCase__ , UpperCAmelCase__ )
__a = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__a = output.prev_sample
__a = torch.sum(torch.abs(UpperCAmelCase__ ) )
__a = torch.mean(torch.abs(UpperCAmelCase__ ) )
if str(UpperCAmelCase__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 360
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
A__ = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
A__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _UpperCAmelCase ( snake_case , snake_case=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = {}
_lowerCAmelCase = R""".*sequential.(\d+).*"""
_lowerCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
# replace sequential layers with list
_lowerCAmelCase = re.match(snake_case , snake_case ).group(1 )
_lowerCAmelCase = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case )//3}.linear.' )
elif re.match(snake_case , snake_case ):
_lowerCAmelCase = int(re.match(snake_case , snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCAmelCase = 1 if projecton_layer == 0 else 2
_lowerCAmelCase = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCAmelCase = value
_lowerCAmelCase = mixed_qkv.size(0 ) // 3
_lowerCAmelCase = mixed_qkv[:qkv_dim]
_lowerCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCAmelCase = mixed_qkv[qkv_dim * 2 :]
_lowerCAmelCase = query_layer
_lowerCAmelCase = key_layer
_lowerCAmelCase = value_layer
else:
_lowerCAmelCase = value
return model_state_dict
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = init_clap(snake_case , enable_fusion=snake_case )
clap_model.eval()
_lowerCAmelCase = clap_model.state_dict()
_lowerCAmelCase = rename_state_dict(snake_case )
_lowerCAmelCase = ClapConfig()
_lowerCAmelCase = enable_fusion
_lowerCAmelCase = ClapModel(snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case , strict=snake_case )
model.save_pretrained(snake_case )
transformers_config.save_pretrained(snake_case )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
A__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 82
|
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = 0
while number > 0:
_lowerCAmelCase = number % 10
sum_of_digits += last_digit
_lowerCAmelCase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _UpperCAmelCase ( snake_case = 1_00 ):
"""simple docstring"""
_lowerCAmelCase = factorial(snake_case )
_lowerCAmelCase = split_and_add(snake_case )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 82
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["image_processor", "tokenizer"]
lowercase_ = "LayoutLMv2ImageProcessor"
lowercase_ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__(self : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase_ , )
lowerCamelCase__: List[str] =kwargs.pop("feature_extractor")
lowerCamelCase__: Optional[int] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def __call__(self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase_ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase_ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : int , ) ->BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True.")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True.")
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.")
# first, apply the image processor
lowerCamelCase__: List[str] =self.image_processor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase__: Union[str, Any] =features["words"]
lowerCamelCase__: int =self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel values
lowerCamelCase__: Dict =features.pop("pixel_values")
if return_overflowing_tokens is True:
lowerCamelCase__: Optional[int] =self.get_overflowing_images(UpperCAmelCase_ , encoded_inputs["overflow_to_sample_mapping"])
lowerCamelCase__: Tuple =images
return encoded_inputs
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(UpperCAmelCase_)} and {len(UpperCAmelCase_)}""")
return images_with_overflow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int) ->List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase_ , )
return self.image_processor
| 364
|
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if isinstance(__a , __a ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(__a , __a ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
lowerCamelCase__: Optional[int] =False
if num < 0:
lowerCamelCase__: Optional[Any] =True
lowerCamelCase__: List[Any] =-num
lowerCamelCase__: list[int] =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__a ) for e in binary )
return "0b" + "".join(str(__a ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__snake_case :Optional[Any] = '''src/transformers'''
__snake_case :List[Any] = '''docs/source/en/tasks'''
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Find the start prompt.
__a = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
__a = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__snake_case :Dict = direct_transformers_import(TRANSFORMERS_PATH)
__snake_case :List[str] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__snake_case :Optional[int] = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __snake_case ( _UpperCAmelCase ):
__a = TASK_GUIDE_TO_MODELS[task_guide]
__a = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCAmelCase , set() )
__a = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False ):
__a , __a , __a , __a = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__a = get_model_list_for_task(_UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
__snake_case :Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case :List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 49
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __UpperCamelCase ( _A : List[str] , _A : Union[str, Any] , _A : Any , _A : Optional[int] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =s.rsplit(_A , _A )
return new.join(_A )
def __UpperCamelCase ( _A : List[Any] ) ->Dict:
"""simple docstring"""
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( _A : str ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ =["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase_ =key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowerCamelCase_ =key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowerCamelCase_ =rreplace(_A , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowerCamelCase_ =rreplace(_A , """.b""" , """.bias""" , 1 )
lowerCamelCase_ =value.float()
return upgrade
@torch.no_grad()
def __UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] , _A : List[Any]=None , _A : Dict=True ) ->Optional[int]:
"""simple docstring"""
from dall_e import Encoder
lowerCamelCase_ =Encoder()
if os.path.exists(_A ):
lowerCamelCase_ =torch.load(_A )
else:
lowerCamelCase_ =torch.hub.load_state_dict_from_url(_A )
if isinstance(_A , _A ):
lowerCamelCase_ =ckpt.state_dict()
encoder.load_state_dict(_A )
if config_path is not None:
lowerCamelCase_ =FlavaImageCodebookConfig.from_pretrained(_A )
else:
lowerCamelCase_ =FlavaImageCodebookConfig()
lowerCamelCase_ =FlavaImageCodebook(_A ).eval()
lowerCamelCase_ =encoder.state_dict()
lowerCamelCase_ =upgrade_state_dict(_A )
hf_model.load_state_dict(_A )
lowerCamelCase_ =hf_model.state_dict()
lowerCamelCase_ =count_parameters(_A )
lowerCamelCase_ =count_parameters(_A )
assert torch.allclose(_A , _A , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_A )
else:
return hf_state_dict
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__A : List[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 154
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : Any = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("This is a test", do_sample=a_ )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
], )
UpperCamelCase__ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a_, [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
], )
UpperCamelCase__ = text_generator("This is a test", do_sample=a_, num_return_sequences=2, return_tensors=a_ )
self.assertEqual(
a_, [
{"generated_token_ids": ANY(a_ )},
{"generated_token_ids": ANY(a_ )},
], )
UpperCamelCase__ = text_generator.model.config.eos_token_id
UpperCamelCase__ = "<pad>"
UpperCamelCase__ = text_generator(
["This is a test", "This is a second test"], do_sample=a_, num_return_sequences=2, batch_size=2, return_tensors=a_, )
self.assertEqual(
a_, [
[
{"generated_token_ids": ANY(a_ )},
{"generated_token_ids": ANY(a_ )},
],
[
{"generated_token_ids": ANY(a_ )},
{"generated_token_ids": ANY(a_ )},
],
], )
@require_tf
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("This is a test", do_sample=a_ )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
], )
UpperCamelCase__ = text_generator(["This is a test", "This is a second test"], do_sample=a_ )
self.assertEqual(
a_, [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
], )
def lowercase_ ( self : Union[str, Any], a_ : Optional[Any], a_ : Any, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = TextGenerationPipeline(model=a_, tokenizer=a_ )
return text_generator, ["This is a test", "Another test"]
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = "Hello I believe in"
UpperCamelCase__ = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2" )
UpperCamelCase__ = text_generator(a_ )
self.assertEqual(
a_, [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}], )
UpperCamelCase__ = text_generator(a_, stop_sequence=" fe" )
self.assertEqual(a_, [{"generated_text": "Hello I believe in fe"}] )
def lowercase_ ( self : int, a_ : Dict, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = text_generator.model
UpperCamelCase__ = text_generator.tokenizer
UpperCamelCase__ = text_generator("This is a test" )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCamelCase__ = text_generator("This is a test", return_full_text=a_ )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
self.assertNotIn("This is a test", outputs[0]["generated_text"] )
UpperCamelCase__ = pipeline(task="text-generation", model=a_, tokenizer=a_, return_full_text=a_ )
UpperCamelCase__ = text_generator("This is a test" )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
self.assertNotIn("This is a test", outputs[0]["generated_text"] )
UpperCamelCase__ = text_generator("This is a test", return_full_text=a_ )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCamelCase__ = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=a_ )
self.assertEqual(
a_, [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
], )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ = text_generator(
["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=a_ )
self.assertEqual(
a_, [
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
[{"generated_text": ANY(a_ )}, {"generated_text": ANY(a_ )}],
], )
with self.assertRaises(a_ ):
UpperCamelCase__ = text_generator("test", return_full_text=a_, return_text=a_ )
with self.assertRaises(a_ ):
UpperCamelCase__ = text_generator("test", return_full_text=a_, return_tensors=a_ )
with self.assertRaises(a_ ):
UpperCamelCase__ = text_generator("test", return_text=a_, return_tensors=a_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ = text_generator("" )
self.assertEqual(a_, [{"generated_text": ANY(a_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase__ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500, max_new_tokens=20 )
UpperCamelCase__ = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a_ ):
text_generator(
"This is a test" * 500, handle_long_generation="hole", max_new_tokens=tokenizer.model_max_length + 10, )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCamelCase__ = pipeline(
model="hf-internal-testing/tiny-random-bloom", model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa}, )
self.assertEqual(pipe.model.device, torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloataa )
UpperCamelCase__ = pipe("This is a test" )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
], )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device, torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloataa )
UpperCamelCase__ = pipe("This is a test" )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
], )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto" )
self.assertEqual(pipe.model.device, torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.floataa )
UpperCamelCase__ = pipe("This is a test" )
self.assertEqual(
a_, [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
], )
@require_torch
@require_torch_gpu
def lowercase_ ( self : Tuple ):
"""simple docstring"""
import torch
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom", device=0, torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase_ ( self : Any ):
"""simple docstring"""
import torch
UpperCamelCase__ = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.floataa )
pipe("This is a test", do_sample=a_, top_p=0.5 )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = "Hello world"
UpperCamelCase__ = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
UpperCamelCase__ = logging.get_logger("transformers.generation.tf_utils" )
else:
UpperCamelCase__ = logging.get_logger("transformers.generation.utils" )
UpperCamelCase__ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a_ ) as cl:
UpperCamelCase__ = text_generator(a_, max_length=10, max_new_tokens=1 )
self.assertIn(a_, cl.out )
# The user only sets one -> no warning
with CaptureLogger(a_ ) as cl:
UpperCamelCase__ = text_generator(a_, max_new_tokens=1 )
self.assertNotIn(a_, cl.out )
with CaptureLogger(a_ ) as cl:
UpperCamelCase__ = text_generator(a_, max_length=10 )
self.assertNotIn(a_, cl.out )
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
_a = 42
_a = None
@staticmethod
def A__ ( ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError
def A__ ( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def A__ ( self ) -> Dict:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def A__ ( cls ) -> List[str]:
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = 'optuna'
@staticmethod
def A__ ( ) -> str:
'''simple docstring'''
return is_optuna_available()
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return run_hp_search_optuna(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return default_hp_space_optuna(lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = 'ray'
_a = '\'ray[tune]\''
@staticmethod
def A__ ( ) -> List[Any]:
'''simple docstring'''
return is_ray_available()
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return run_hp_search_ray(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return default_hp_space_ray(lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = 'sigopt'
@staticmethod
def A__ ( ) -> Any:
'''simple docstring'''
return is_sigopt_available()
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Any:
'''simple docstring'''
return run_hp_search_sigopt(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_sigopt(lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = 'wandb'
@staticmethod
def A__ ( ) -> str:
'''simple docstring'''
return is_wandb_available()
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return run_hp_search_wandb(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
return default_hp_space_wandb(lowerCAmelCase )
lowercase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a ( ) -> List[str]:
"""simple docstring"""
_lowercase =[backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__A ) > 0:
_lowercase =available_backends[0].name
if len(__A ) > 1:
logger.info(
F'''{len(__A )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 205
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = u
for i in range(1 , __A):
_a = temp * (u - i)
return temp
def lowerCAmelCase ():
"""simple docstring"""
_a = int(input('''enter the numbers of values: '''))
_a = []
for _ in range(__A):
y.append([])
for i in range(__A):
for j in range(__A):
y[i].append(__A)
_a = 0
print('''enter the values of parameters in a list: ''')
_a = list(map(__A , input().split()))
print('''enter the values of corresponding parameters: ''')
for i in range(__A):
_a = float(input())
_a = int(input('''enter the value to interpolate: '''))
_a = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A):
for j in range(n - i):
_a = y[j + 1][i - 1] - y[j][i - 1]
_a = y[0][0]
for i in range(1 , __A):
summ += (ucal(__A , __A) * y[0][i]) / math.factorial(__A)
print(F'''the value at {value} is {summ}''')
if __name__ == "__main__":
main()
| 211
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = RobertaTokenizer
def __init__( self : List[str] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[str]="replace" , _UpperCAmelCase : Tuple="<s>" , _UpperCAmelCase : Union[str, Any]="</s>" , _UpperCAmelCase : List[str]="</s>" , _UpperCAmelCase : Union[str, Any]="<s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : Dict="<pad>" , _UpperCAmelCase : List[str]="<mask>" , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = getattr(_UpperCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**_UpperCAmelCase )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = "post_processor"
UpperCAmelCase_ = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase_ = tuple(state["cls"] )
UpperCAmelCase_ = False
if state.get("add_prefix_space" , _UpperCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = True
if state.get("trim_offsets" , _UpperCAmelCase ) != trim_offsets:
UpperCAmelCase_ = trim_offsets
UpperCAmelCase_ = True
if changes_to_apply:
UpperCAmelCase_ = getattr(_UpperCAmelCase , state.pop("type" ) )
UpperCAmelCase_ = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Tuple , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
UpperCAmelCase_ = value
def lowercase__ ( self : List[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : str ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , *_UpperCAmelCase : Any , **_UpperCAmelCase : Optional[Any] ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def lowercase__ ( self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 241
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase = """src/diffusers"""
lowerCamelCase = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase = spec.loader.load_module()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , lowerCAmelCase__ ) is not None
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = object_name.split("." )
UpperCAmelCase_ = 0
# First let's find the module where our object lives.
UpperCAmelCase_ = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f"""{module}.py""" ) ):
i += 1
if i < len(lowerCAmelCase__ ):
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(lowerCAmelCase__ , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.readlines()
# Now let's find the class / func in the code!
UpperCAmelCase_ = ""
UpperCAmelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCAmelCase_ = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_ = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
lowerCamelCase = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowerCamelCase = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowerCamelCase = re.compile(r"""<FILL\s+[^>]*>""")
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = code.split("\n" )
UpperCAmelCase_ = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
UpperCAmelCase_ = f"""class Bla:\n{code}"""
UpperCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase__ )
UpperCAmelCase_ = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
with open(lowerCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
UpperCAmelCase_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = search.groups()
UpperCAmelCase_ = find_code_in_diffusers(lowerCAmelCase__ )
UpperCAmelCase_ = get_indent(lowerCAmelCase__ )
UpperCAmelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCAmelCase_ = theoretical_indent
UpperCAmelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCAmelCase_ = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f"""^{indent}# End copy""" , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_ = lines[start_index:line_index]
UpperCAmelCase_ = "".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCAmelCase_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
UpperCAmelCase_ = "\n".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = replace_pattern.replace("with" , "" ).split("," )
UpperCAmelCase_ = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = pattern.groups()
UpperCAmelCase_ = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
UpperCAmelCase_ = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
UpperCAmelCase_ = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCAmelCase_ = blackify(lines[start_index - 1] + theoretical_code )
UpperCAmelCase_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCAmelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCAmelCase_ = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def a__ ( lowerCAmelCase__ = False ):
UpperCAmelCase_ = glob.glob(os.path.join(lowerCAmelCase__ , "**/*.py" ) , recursive=lowerCAmelCase__ )
UpperCAmelCase_ = []
for filename in all_files:
UpperCAmelCase_ = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = "\n".join(lowerCAmelCase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 241
| 1
|
import requests
lowerCAmelCase__ : Any ='''''' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase__ : Tuple ='''https://api.openweathermap.org/data/2.5/'''
def __lowercase ( a__ = "Chicago" , a__ = APPID ) -> dict:
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def __lowercase ( a__ = "Kolkata, India" , a__ = APPID ) -> dict:
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def __lowercase ( a__ = 55.68 , a__ = 12.57 , a__ = APPID ) -> dict:
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase__ : List[Any] =input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 257
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] =input('''Enter image url: ''').strip()
print(F'''Downloading image from {url} ...''')
lowerCAmelCase__ : int =BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase__ : Union[str, Any] =soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCAmelCase__ : int =requests.get(image_url).content
lowerCAmelCase__ : Optional[int] =F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 257
| 1
|
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class a__ :
_SCREAMING_SNAKE_CASE : Tuple = None
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : Optional[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Optional[int] = os.path.join(_UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_UpperCamelCase )
_lowercase : List[Any] = self.feature_extraction_class.from_json_file(_UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : str = feat_extract_first.save_pretrained(_UpperCamelCase )[0]
check_json_file_has_correct_format(_UpperCamelCase )
_lowercase : Any = self.feature_extraction_class.from_pretrained(_UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.feature_extraction_class()
self.assertIsNotNone(_UpperCamelCase )
| 199
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = IFImgaImgSuperResolutionPipeline
_SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_SCREAMING_SNAKE_CASE : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : Tuple = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 199
| 1
|
import comet # From: unbabel-comet
import torch
import datasets
_lowerCAmelCase : Union[str, Any] = datasets.logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_lowerCAmelCase : Tuple = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_lowerCAmelCase : Any = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[str] ):
'''simple docstring'''
if self.config_name == "default":
A_ : Optional[Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
A_ : Any = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Tuple , snake_case :Union[str, Any] , snake_case :Optional[Any] , snake_case :str=None , snake_case :str=False ):
'''simple docstring'''
if gpus is None:
A_ : Tuple = 1 if torch.cuda.is_available() else 0
A_ : Tuple = {"src": sources, "mt": predictions, "ref": references}
A_ : Union[str, Any] = [dict(zip(snake_case , snake_case ) ) for t in zip(*data.values() )]
A_ , A_ : List[Any] = self.scorer.predict(snake_case , gpus=snake_case , progress_bar=snake_case )
return {"mean_score": mean_score, "scores": scores}
| 300
|
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCamelCase_ ( a_ ):
_A : str = 'lxmert'
_A : Dict = {}
def __init__( self , snake_case__=3_05_22 , snake_case__=7_68 , snake_case__=12 , snake_case__=95_00 , snake_case__=16_00 , snake_case__=4_00 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=9 , snake_case__=5 , snake_case__=5 , snake_case__=20_48 , snake_case__=4 , snake_case__=6.67 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , **snake_case__ , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**snake_case__ )
| 350
|
"""simple docstring"""
def _lowerCAmelCase ( ):
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 248
| 0
|
"""simple docstring"""
from __future__ import annotations
A: Dict = tuple[int, int, int]
A: Optional[Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
A: List[str] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
A: str = "EGZWVONAHDCLFQMSIPJBYUKXTR"
A: List[str] = "FOBHMDKEXQNRAULPGSJVTYICZW"
A: Any = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
A: str = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
A: Dict = "RMDJXFUWGISLHVTCQNKYPBEZOA"
A: Optional[int] = "SGLCPQWZHKXAREONTFBVIYJUDM"
A: Optional[int] = "HVSICLTYKQUBXDWAJZOMFGPREN"
A: List[str] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
A: Optional[int] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
A: Optional[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def _snake_case ( UpperCamelCase : RotorPositionT , UpperCamelCase : RotorSelectionT , UpperCamelCase : str ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(UpperCamelCase ) )) < 3:
UpperCAmelCase : Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(UpperCamelCase )
# Checks if rotor positions are valid
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = rotpos
if not 0 < rotorposa <= len(UpperCamelCase ):
UpperCAmelCase : Optional[Any] = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(UpperCamelCase )
if not 0 < rotorposa <= len(UpperCamelCase ):
UpperCAmelCase : List[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(UpperCamelCase )
if not 0 < rotorposa <= len(UpperCamelCase ):
UpperCAmelCase : List[Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(UpperCamelCase )
# Validates string and returns dict
UpperCAmelCase : Optional[Any] = _plugboard(UpperCamelCase )
return rotpos, rotsel, pbdict
def _snake_case ( UpperCamelCase : str ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : List[str] = F"Plugboard setting isn't type string ({type(UpperCamelCase )})"
raise TypeError(UpperCamelCase )
elif len(UpperCamelCase ) % 2 != 0:
UpperCAmelCase : Union[str, Any] = F"Odd number of symbols ({len(UpperCamelCase )})"
raise Exception(UpperCamelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
UpperCAmelCase : str = set()
for i in pbstring:
if i not in abc:
UpperCAmelCase : str = F"'{i}' not in list of symbols"
raise Exception(UpperCamelCase )
elif i in tmppbl:
UpperCAmelCase : Dict = F"Duplicate symbol ({i})"
raise Exception(UpperCamelCase )
else:
tmppbl.add(UpperCamelCase )
del tmppbl
# Created the dictionary
UpperCAmelCase : Dict = {}
for j in range(0 , len(UpperCamelCase ) - 1 , 2 ):
UpperCAmelCase : Optional[int] = pbstring[j + 1]
UpperCAmelCase : List[str] = pbstring[j]
return pb
def _snake_case ( UpperCamelCase : str , UpperCamelCase : RotorPositionT , UpperCamelCase : RotorSelectionT = (rotora, rotora, rotora) , UpperCamelCase : str = "" , ):
UpperCAmelCase : Optional[int] = text.upper()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = _validator(
UpperCamelCase , UpperCamelCase , plugb.upper() )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = rotor_position
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCAmelCase : int = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCAmelCase : Optional[int] = plugboard[symbol]
# rotor ra --------------------------
UpperCAmelCase : List[str] = abc.index(UpperCamelCase ) + rotorposa
UpperCAmelCase : Tuple = rotora[index % len(UpperCamelCase )]
# rotor rb --------------------------
UpperCAmelCase : List[Any] = abc.index(UpperCamelCase ) + rotorposa
UpperCAmelCase : Optional[int] = rotora[index % len(UpperCamelCase )]
# rotor rc --------------------------
UpperCAmelCase : Dict = abc.index(UpperCamelCase ) + rotorposa
UpperCAmelCase : Union[str, Any] = rotora[index % len(UpperCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCAmelCase : Union[str, Any] = reflector[symbol]
# 2nd rotors
UpperCAmelCase : str = abc[rotora.index(UpperCamelCase ) - rotorposa]
UpperCAmelCase : int = abc[rotora.index(UpperCamelCase ) - rotorposa]
UpperCAmelCase : Optional[Any] = abc[rotora.index(UpperCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCAmelCase : Any = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(UpperCamelCase ):
UpperCAmelCase : Dict = 0
rotorposa += 1
if rotorposa >= len(UpperCamelCase ):
UpperCAmelCase : Tuple = 0
rotorposa += 1
if rotorposa >= len(UpperCamelCase ):
UpperCAmelCase : List[str] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(UpperCamelCase )
return "".join(UpperCamelCase )
if __name__ == "__main__":
A: Dict = "This is my Python script that emulates the Enigma machine from WWII."
A: Union[str, Any] = (1, 1, 1)
A: int = "pictures"
A: List[Any] = (rotora, rotora, rotora)
A: Any = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 109
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: List[str] = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109
| 1
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A=3 , _A=3_2 , _A=3 , _A=1_0 , _A=[1_0, 2_0, 3_0, 4_0] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase__ )
__lowerCAmelCase = model(lowerCAmelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase__ )
__lowerCAmelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a__ ( a__ , unittest.TestCase ):
_a : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_a : List[Any] = False
_a : Tuple = False
_a : Any = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase__ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
def check_hidden_states_output(_A , _A , _A ):
__lowerCAmelCase = model_class(lowerCAmelCase__ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase__ , return_tensors="np" )
__lowerCAmelCase = model(**lowerCAmelCase__ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 363
|
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
UpperCamelCase__ = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
UpperCamelCase__ = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A="uniform_average" , _A=True ):
"""simple docstring"""
__lowerCAmelCase = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 102
| 0
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 97
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''nielsr/canine-s''': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe0_00
_A = 0xe0_01
_A = 0xe0_02
_A = 0xe0_03
_A = 0xe0_04
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( __UpperCAmelCase ):
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase_ = UNICODE_VOCAB_SIZE
lowerCAmelCase_ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return list(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return "".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
return ()
| 278
| 0
|
"""simple docstring"""
import numpy as np
def lowercase__ ( snake_case_ :np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def lowercase__ ( snake_case_ :np.ndarray ):
return vector * sigmoid(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 86
| 0
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __snake_case ( UpperCAmelCase_ : List[str] ):
return getitem, k
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
return setitem, k, v
def __snake_case ( UpperCAmelCase_ : Optional[Any] ):
return delitem, k
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] ):
try:
return fun(UpperCAmelCase_ , *UpperCAmelCase_ ), None
except Exception as e:
return None, e
a_ : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a_ : int = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a_ : Tuple = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a_ : Optional[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a_ : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = HashMap(initial_block_size=4 )
lowerCamelCase_ = {}
for _, (fun, *args) in enumerate(UpperCAmelCase_ ):
lowerCamelCase_ ,lowerCamelCase_ = _run_operation(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = _run_operation(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ )
assert my_res == py_res
assert str(UpperCAmelCase_ ) == str(UpperCAmelCase_ )
assert set(UpperCAmelCase_ ) == set(UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
assert set(my.items() ) == set(py.items() )
def __snake_case ( ):
def is_public(UpperCAmelCase_ : str ) -> bool:
return not name.startswith("_" )
lowerCamelCase_ = {name for name in dir({} ) if is_public(UpperCAmelCase_ )}
lowerCamelCase_ = {name for name in dir(HashMap() ) if is_public(UpperCAmelCase_ )}
assert dict_public_names > hash_public_names
| 55
|
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11
| 0
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__A = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
__A = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
__A = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
__A = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__A = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 350
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCamelCase__ = ["bert-base-uncased", "bert-base-cased"]
UpperCamelCase__ = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class A ( tf.keras.Model ):
def __init__(self : List[Any] , __UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = TFAutoModel.from_config(SCREAMING_SNAKE_CASE_ )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = self.bert(**SCREAMING_SNAKE_CASE_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A ( unittest.TestCase ):
def lowercase_ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = [
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCAmelCase__ = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase__ = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we\'re going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="tf" , padding="longest" )
UpperCAmelCase__ = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = tf_tokenizer(self.paired_sentences )
UpperCAmelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowercase_ (self : int ) -> Tuple:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = tf.function(SCREAMING_SNAKE_CASE_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase__ = tf.constant(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = compiled_tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCAmelCase__ = model(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase__ = Path(SCREAMING_SNAKE_CASE_ ) / "saved.model"
model.save(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = tf.keras.models.load_model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = loaded_model(SCREAMING_SNAKE_CASE_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 65
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase__ : Any = parser.parse_args()
lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ : List[str] = CLIPImageProcessor()
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 328
| 0
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __magic_name__ : int , __magic_name__ : int=13 , __magic_name__ : List[Any]=32 , __magic_name__ : int=2 , __magic_name__ : Any=3 , __magic_name__ : Optional[int]=16 , __magic_name__ : Any=[1, 2, 1] , __magic_name__ : Tuple=[2, 2, 4] , __magic_name__ : str=2 , __magic_name__ : List[Any]=2.0 , __magic_name__ : Any=True , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[str]=0.0 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : int=False , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : str=1e-5 , __magic_name__ : List[str]=True , __magic_name__ : Optional[int]=None , __magic_name__ : Union[str, Any]=True , __magic_name__ : List[str]=10 , __magic_name__ : Tuple=8 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = patch_norm
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = encoder_stride
def __A ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def __A ( self : str ) -> Optional[int]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = SwinvaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : int ) -> List[str]:
SCREAMING_SNAKE_CASE_ = SwinvaForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = SwinvaForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCamelCase__ = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : str ) -> int:
SCREAMING_SNAKE_CASE_ = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
def __A ( self : Union[str, Any] ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def __A ( self : List[Any] ) -> int:
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def __A ( self : Any ) -> List[Any]:
pass
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
SCREAMING_SNAKE_CASE_ = len(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = config.window_size**2
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
SCREAMING_SNAKE_CASE_ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __A ( self : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE_ = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE_ = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def __A ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __A ( self : Optional[int] ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = SwinvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> Dict:
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def __A ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 355
|
from collections import deque
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = process_name # process name
SCREAMING_SNAKE_CASE_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE_ = arrival_time
SCREAMING_SNAKE_CASE_ = burst_time # remaining burst time
SCREAMING_SNAKE_CASE_ = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE_ = 0 # time from arrival time to completion time
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : deque[Process] , __magic_name__ : int , ) -> None:
# total number of mlfq's queues
SCREAMING_SNAKE_CASE_ = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE_ = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE_ = queue
# current time
SCREAMING_SNAKE_CASE_ = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE_ = deque()
def __A ( self : Dict ) -> list[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __A ( self : Tuple , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __A ( self : str , __magic_name__ : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def __A ( self : Optional[Any] , __magic_name__ : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __A ( self : Optional[Any] , __magic_name__ : deque[Process] ) -> deque[Process]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of finished process
while len(__magic_name__ ) != 0:
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__magic_name__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE_ = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE_ = self.current_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __A ( self : Any , __magic_name__ : deque[Process] , __magic_name__ : int ) -> tuple[deque[Process], deque[Process]]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__magic_name__ ) ):
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__magic_name__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__magic_name__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE_ = 0
# set the finish time
SCREAMING_SNAKE_CASE_ = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __A ( self : Any ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Dict = Process("P1", 0, 53)
A : str = Process("P2", 0, 17)
A : List[Any] = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Dict = 3
A : Any = [17, 25]
A : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
A : Union[str, Any] = Process("P1", 0, 53)
A : Any = Process("P2", 0, 17)
A : Dict = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Optional[int] = 3
A : int = [17, 25]
A : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
A : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A : Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 305
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCAmelCase = ["""bert-base-uncased""", """bert-base-cased"""]
UpperCAmelCase = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class A_ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__()
lowercase = tokenizer
lowercase = AutoConfig.from_pretrained(snake_case )
lowercase = TFAutoModel.from_config(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.tokenizer(snake_case )
lowercase = self.bert(**snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
lowercase = [
BertTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowercase = [TFBertTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case , use_fast_bert_tokenizer=snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowercase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase = tokenizer(snake_case , return_tensors='tf' , padding='longest' )
lowercase = tf_tokenizer(snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for tf_tokenizer in self.tf_tokenizers:
lowercase = tf_tokenizer(self.paired_sentences )
lowercase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for tf_tokenizer in self.tf_tokenizers:
lowercase = tf.function(snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase = tf.constant(snake_case )
lowercase = compiled_tokenizer(snake_case )
lowercase = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for tf_tokenizer in self.tf_tokenizers:
lowercase = ModelToSave(tokenizer=snake_case )
lowercase = tf.convert_to_tensor(self.test_sentences )
lowercase = model(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase = Path(snake_case ) / "saved.model"
model.save(snake_case )
lowercase = tf.keras.models.load_model(snake_case )
lowercase = loaded_model(snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 195
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , A : Optional[str] = None ):
_UpperCAmelCase : Dict = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Union[str, Any] = Extractor
def _A ( self : Tuple , A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Dict = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def _A ( self : int , A : str , A : bool ):
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def _A ( self : Optional[int] , A : str , A : bool = False ):
_UpperCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
_UpperCAmelCase : Optional[Any] = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def _A ( cls : str , A : Union[Path, str] , **A : Dict ):
...
@staticmethod
@abstractmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
...
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[bytes] = []
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
with open(A , "rb" ) as f:
return f.read(A )
@classmethod
def _A ( cls : Any , A : Union[Path, str] , A : bytes = b"" ):
if not magic_number:
_UpperCAmelCase : Any = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : int = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
def _A ( cls : str , A : Union[Path, str] , **A : List[Any] ):
return tarfile.is_tarfile(A )
@staticmethod
def _A ( A : Union[str, Any] , A : str ):
def resolved(A : str ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A : str , A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A : str , A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[str] = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
_UpperCAmelCase : Optional[int] = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : int = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = [b"\x1F\x8B"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with gzip.open(A , "rb" ) as gzip_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _A ( cls : Dict , A : Union[Path, str] , A : bytes = b"" ):
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , "rb" ) as fp:
_UpperCAmelCase : Tuple = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Dict = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
_UpperCAmelCase : Any = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with lzma.open(A ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : List[str] = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_UpperCAmelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(A , "rb" ) as ifh, open(A , "wb" ) as ofh:
dctx.copy_stream(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x42\x5A\x68"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with bza.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , "r" ) as archive:
archive.extractall(A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = [b"\x04\x22\x4D\x18"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _A ( cls : List[Any] ):
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def _A ( cls : Optional[Any] , A : Union[Path, str] , A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=A , )
_UpperCAmelCase : Union[str, Any] = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _A ( cls : Dict , A : Union[Path, str] ): # <Added version="2.4.0"/>
_UpperCAmelCase : Optional[int] = cls._get_magic_number_max_length()
_UpperCAmelCase : str = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def _A ( cls : List[str] , A : Union[Path, str] , A : Union[Path, str] , A : Optional[str] = None , A : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
_UpperCAmelCase : Tuple = str(Path(A ).with_suffix(".lock" ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=A , )
_UpperCAmelCase : Tuple = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCAmelCase : Tuple = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A )
| 31
| 0
|
__lowerCAmelCase = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 356
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__lowerCAmelCase = logging.getLogger(__name__)
def snake_case_ ( snake_case , snake_case ) -> Optional[int]:
lowercase__: Optional[int] = np.argmax(snake_case , axis=1 )
return np.sum(outputs == labels )
def snake_case_ ( snake_case ) -> Dict:
with open(snake_case , encoding='utf_8' ) as f:
lowercase__: str = csv.reader(snake_case )
lowercase__: int = []
next(snake_case ) # skip the first line
for line in tqdm(snake_case ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple:
lowercase__: List[Any] = []
for dataset in encoded_datasets:
lowercase__: Dict = len(snake_case )
lowercase__: int = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowercase__: int = np.zeros((n_batch, 2) , dtype=np.intaa )
lowercase__: Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
lowercase__: Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(snake_case ):
lowercase__: List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowercase__: List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowercase__: Union[str, Any] = with_conta
lowercase__: List[Any] = with_conta
lowercase__: Any = len(snake_case ) - 1
lowercase__: Dict = len(snake_case ) - 1
lowercase__: Optional[Any] = with_conta
lowercase__: Tuple = with_conta
lowercase__: int = mc_label
lowercase__: Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(snake_case ) for t in all_inputs ) )
return tensor_datasets
def snake_case_ ( ) -> Union[str, Any]:
lowercase__: Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=snake_case , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=snake_case , type=snake_case , required=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=snake_case , default='' )
parser.add_argument('--eval_dataset' , type=snake_case , default='' )
parser.add_argument('--seed' , type=snake_case , default=42 )
parser.add_argument('--num_train_epochs' , type=snake_case , default=3 )
parser.add_argument('--train_batch_size' , type=snake_case , default=8 )
parser.add_argument('--eval_batch_size' , type=snake_case , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=snake_case , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=snake_case , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=snake_case , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=snake_case , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=snake_case , default=0.0_1 )
parser.add_argument('--lm_coef' , type=snake_case , default=0.9 )
parser.add_argument('--n_valid' , type=snake_case , default=3_74 )
parser.add_argument('--server_ip' , type=snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=snake_case , default='' , help='Can be used for distant debugging.' )
lowercase__: List[str] = parser.parse_args()
print(snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowercase__: Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowercase__: Tuple = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(snake_case , snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowercase__: Any = ['_start_', '_delimiter_', '_classify_']
lowercase__: Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(snake_case )
lowercase__: int = tokenizer.convert_tokens_to_ids(snake_case )
lowercase__: int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(snake_case ) )
model.to(snake_case )
# Load and encode the datasets
def tokenize_and_encode(snake_case ):
if isinstance(snake_case , snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case ) )
elif isinstance(snake_case , snake_case ):
return obj
return [tokenize_and_encode(snake_case ) for o in obj]
logger.info('Encoding dataset...' )
lowercase__: Dict = load_rocstories_dataset(args.train_dataset )
lowercase__: Dict = load_rocstories_dataset(args.eval_dataset )
lowercase__: str = (train_dataset, eval_dataset)
lowercase__: Any = tokenize_and_encode(snake_case )
# Compute the max input length for the Transformer
lowercase__: Optional[Any] = model.config.n_positions // 2 - 2
lowercase__: Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowercase__: List[str] = min(snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowercase__: str = pre_process_datasets(snake_case , snake_case , snake_case , *snake_case )
lowercase__ , lowercase__: Optional[Any] = tensor_datasets[0], tensor_datasets[1]
lowercase__: List[str] = TensorDataset(*snake_case )
lowercase__: Dict = RandomSampler(snake_case )
lowercase__: Optional[int] = DataLoader(snake_case , sampler=snake_case , batch_size=args.train_batch_size )
lowercase__: str = TensorDataset(*snake_case )
lowercase__: str = SequentialSampler(snake_case )
lowercase__: Optional[Any] = DataLoader(snake_case , sampler=snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowercase__: Union[str, Any] = args.max_steps
lowercase__: Tuple = args.max_steps // (len(snake_case ) // args.gradient_accumulation_steps) + 1
else:
lowercase__: Optional[Any] = len(snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
lowercase__: str = list(model.named_parameters() )
lowercase__: Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowercase__: str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowercase__: Tuple = AdamW(snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
lowercase__: Tuple = get_linear_schedule_with_warmup(
snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=snake_case )
if args.do_train:
lowercase__ , lowercase__ , lowercase__: int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowercase__: str = 0
lowercase__: Optional[Any] = 0
lowercase__: List[Any] = tqdm(snake_case , desc='Training' )
for step, batch in enumerate(snake_case ):
lowercase__: Union[str, Any] = tuple(t.to(snake_case ) for t in batch )
lowercase__ , lowercase__ , lowercase__ , lowercase__: List[Any] = batch
lowercase__: List[str] = model(snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
lowercase__: Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowercase__: Union[str, Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowercase__: Tuple = 'Training loss: {:.2e} lr: {:.2e}'.format(snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowercase__: Any = model.module if hasattr(snake_case , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowercase__: Tuple = os.path.join(args.output_dir , snake_case )
lowercase__: List[str] = os.path.join(args.output_dir , snake_case )
torch.save(model_to_save.state_dict() , snake_case )
model_to_save.config.to_json_file(snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowercase__: Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowercase__: Any = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(snake_case )
if args.do_eval:
model.eval()
lowercase__ , lowercase__: Optional[Any] = 0, 0
lowercase__ , lowercase__: List[Any] = 0, 0
for batch in tqdm(snake_case , desc='Evaluating' ):
lowercase__: str = tuple(t.to(snake_case ) for t in batch )
lowercase__ , lowercase__ , lowercase__ , lowercase__: Union[str, Any] = batch
with torch.no_grad():
lowercase__ , lowercase__ , lowercase__ , lowercase__: Any = model(
snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
lowercase__: Dict = mc_logits.detach().cpu().numpy()
lowercase__: Tuple = mc_labels.to('cpu' ).numpy()
lowercase__: Dict = accuracy(snake_case , snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowercase__: Optional[int] = eval_loss / nb_eval_steps
lowercase__: Optional[int] = eval_accuracy / nb_eval_examples
lowercase__: int = tr_loss / nb_tr_steps if args.do_train else None
lowercase__: Optional[Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowercase__: Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , snake_case , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 288
| 0
|
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 241
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowerCAmelCase_ : str = precision
lowerCAmelCase_ : Tuple = ceil(precision / 14 )
lowerCAmelCase_ : Optional[int] = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Any = 13591409
lowerCAmelCase_ : List[str] = Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
lowerCAmelCase_ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase__ = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 241
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "vivit"
def __init__( self : str , __lowerCamelCase : List[Any]=224 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Tuple=[2, 16, 16] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Any="gelu_fast" , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1e-06 , __lowerCamelCase : Dict=True , **__lowerCamelCase : Any , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = tubelet_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = qkv_bias
super().__init__(**__lowerCamelCase )
| 218
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
_SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE__ = model_type_to_module_name(_A )
SCREAMING_SNAKE_CASE__ = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , '''__name__''' , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE__ = importlib.import_module('''transformers''' )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def UpperCAmelCase_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(_A , encoding='''utf-8''' ) as reader:
return json.load(_A )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> int:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def lowercase_ ( cls : Optional[int] , __lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''config''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''trust_remote_code''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = config_dict.get('''image_processor_type''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE__ = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
SCREAMING_SNAKE_CASE__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
SCREAMING_SNAKE_CASE__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE__ = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
SCREAMING_SNAKE_CASE__ = image_processor_class_from_name(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE__ = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE__ = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE__ = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''code_revision''' , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE__ = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowercase_ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> str:
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 218
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
__A = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__A = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__A = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(_A , _A , sample_weight=_A ) ),
}
| 90
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( __a):
__a : Optional[Any] = ["""pixel_values"""]
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BICUBIC , _A = True , _A = None , _A = True , _A = 1 / 2_55 , _A = True , _A = IMAGENET_DEFAULT_MEAN , _A = IMAGENET_DEFAULT_STD , **_A , ) -> None:
'''simple docstring'''
super().__init__(**_A )
_UpperCAmelCase : List[Any] = size if size is not None else {"""shortest_edge""": 2_24}
_UpperCAmelCase : Optional[Any] = get_size_dict(_A , default_to_square=_A )
_UpperCAmelCase : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_UpperCAmelCase : List[Any] = get_size_dict(_A , param_name="""crop_size""" )
_UpperCAmelCase : Any = do_resize
_UpperCAmelCase : Optional[int] = size
_UpperCAmelCase : List[str] = resample
_UpperCAmelCase : Optional[Any] = do_center_crop
_UpperCAmelCase : int = crop_size
_UpperCAmelCase : Optional[int] = do_rescale
_UpperCAmelCase : Optional[int] = rescale_factor
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __snake_case ( self , _A , _A , _A = PILImageResampling.BICUBIC , _A = None , **_A , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCAmelCase : Any = int((2_56 / 2_24) * size["""shortest_edge"""] )
_UpperCAmelCase : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
_UpperCAmelCase : Tuple = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_A , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_A , data_format=_A , **_A )
def __snake_case ( self , _A , _A , _A = None , **_A , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A )
def __snake_case ( self , _A , _A , _A = None , **_A , ) -> np.ndarray:
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def __snake_case ( self , _A , _A , _A , _A = None , **_A , ) -> np.ndarray:
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __snake_case ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ) -> BatchFeature:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Union[str, Any] = resample if resample is not None else self.resample
_UpperCAmelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Tuple = size if size is not None else self.size
_UpperCAmelCase : int = get_size_dict(_A , default_to_square=_A )
_UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : Union[str, Any] = get_size_dict(_A , param_name="""crop_size""" )
_UpperCAmelCase : Optional[int] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase : Any = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCAmelCase : Optional[Any] = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
_UpperCAmelCase : Optional[int] = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
_UpperCAmelCase : Tuple = [self.rescale(_A , _A ) for image in images]
if do_normalize:
_UpperCAmelCase : List[Any] = [self.normalize(_A , _A , _A ) for image in images]
_UpperCAmelCase : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCAmelCase : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_A , tensor_type=_A )
| 246
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : Any = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=8 ):
__snake_case : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__snake_case : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a (__a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : DDPMScheduler , lowerCamelCase : VQModel , ) -> str:
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
__snake_case : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Tuple ) -> List[Any]:
if latents is None:
__snake_case : Optional[Any] = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__snake_case : str = latents.to(a__ )
__snake_case : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : int , lowerCamelCase : Optional[Any]=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[Any] = torch.device(F'cuda:{gpu_id}' )
__snake_case : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __snake_case ( self : Dict , lowerCamelCase : List[Any]=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__snake_case : str = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__snake_case : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__snake_case , __snake_case : Tuple = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
__snake_case : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : Tuple ) -> int:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self : int , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 100 , lowerCamelCase : float = 4.0 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> int:
__snake_case : Any = self._execution_device
__snake_case : Optional[int] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
__snake_case : List[Any] = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
__snake_case : Any = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
__snake_case : Union[str, Any] = torch.cat(a__ , dim=0 )
__snake_case : List[str] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__snake_case : str = image_embeds.repeat_interleave(a__ , dim=0 )
__snake_case : int = negative_image_embeds.repeat_interleave(a__ , dim=0 )
__snake_case : Dict = hint.repeat_interleave(a__ , dim=0 )
__snake_case : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
__snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
self.scheduler.set_timesteps(a__ , device=a__ )
__snake_case : Dict = self.scheduler.timesteps
__snake_case : Dict = self.movq.config.latent_channels
__snake_case , __snake_case : Union[str, Any] = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
# create initial latent
__snake_case : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a__ , a__ , a__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Any = {"image_embeds": image_embeds, "hint": hint}
__snake_case : int = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
__snake_case , __snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
__snake_case , __snake_case : Optional[int] = noise_pred.chunk(2 )
__snake_case , __snake_case : List[str] = variance_pred.chunk(2 )
__snake_case : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__snake_case : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__snake_case , __snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
__snake_case : Tuple = self.movq.decode(a__ , force_not_quantize=a__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__snake_case : Union[str, Any] = image * 0.5 + 0.5
__snake_case : str = image.clamp(0 , 1 )
__snake_case : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : int = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 360
|
import logging
from transformers.configuration_utils import PretrainedConfig
_snake_case : Optional[Any] = logging.getLogger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "masked_bert"
def __init__( self : Optional[int] , lowerCamelCase : Any=30522 , lowerCamelCase : Tuple=768 , lowerCamelCase : str=12 , lowerCamelCase : Dict=12 , lowerCamelCase : List[str]=3072 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : List[Any]=512 , lowerCamelCase : int=2 , lowerCamelCase : str=0.02 , lowerCamelCase : List[str]=1E-12 , lowerCamelCase : Any=0 , lowerCamelCase : Dict="topK" , lowerCamelCase : List[Any]="constant" , lowerCamelCase : Dict=0.0 , **lowerCamelCase : List[Any] , ) -> Dict:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__snake_case : Optional[Any] = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : str = intermediate_size
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[str] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : str = pruning_method
__snake_case : int = mask_init
__snake_case : Any = mask_scale
| 134
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
a__ : Tuple = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=a__ )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.getbasetemp() / '''cache'''
SCREAMING_SNAKE_CASE : int = test_hf_cache_home / '''datasets'''
SCREAMING_SNAKE_CASE : List[str] = test_hf_cache_home / '''metrics'''
SCREAMING_SNAKE_CASE : Union[str, Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(a__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(a__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(a__ ) )
SCREAMING_SNAKE_CASE : int = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(a__ ) )
SCREAMING_SNAKE_CASE : Dict = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(a__ ) )
@pytest.fixture(autouse=a__ , scope='''session''' )
def UpperCAmelCase_( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , a__ )
@pytest.fixture
def UpperCAmelCase_( a__ ):
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , a__ )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=64 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Any:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = vocab_size - 1
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.prepare_config_and_inputs()
lowercase_ = True
return config, input_ids, input_mask, token_labels
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = GPTNeoXModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = True
lowercase_ = GPTNeoXModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = GPTNeoXForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = GPTNeoXForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = GPTNeoXForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = GPTNeoXForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = True
lowercase_ = GPTNeoXForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase )
lowercase_ = output_from_no_past["hidden_states"][0]
lowercase_ = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )["hidden_states"][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = GPTNeoXModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ = None
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = GPTNeoXModel(UpperCAmelCase )
original_model.to(UpperCAmelCase )
original_model.eval()
lowercase_ = original_model(UpperCAmelCase ).last_hidden_state
lowercase_ = original_model(UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"type": scaling_type, "factor": 10.0}
lowercase_ = GPTNeoXModel(UpperCAmelCase )
scaled_model.to(UpperCAmelCase )
scaled_model.eval()
lowercase_ = scaled_model(UpperCAmelCase ).last_hidden_state
lowercase_ = scaled_model(UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
lowercase_ = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(UpperCAmelCase )
lowercase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowercase_ = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
lowercase_ = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=20 )
lowercase_ = tokenizer.batch_decode(UpperCAmelCase )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 353
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : Tuple ={
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__snake_case : Optional[int] =[
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__snake_case : Optional[int] =[
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__snake_case : Union[str, Any] =[
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =(DPMSolverSDEScheduler,)
snake_case_ =10
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__lowerCamelCase )
return config
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase ,beta_end=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Dict = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase__ : List[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Tuple = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : List[Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = output.prev_sample
lowerCAmelCase__ : List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**__lowerCamelCase ,use_karras_sigmas=__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = output.prev_sample
lowerCAmelCase__ : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 129
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : List[Any] = logging.getLogger(__name__)
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
# save results
if os.path.exists(__snake_case ):
if os.path.exists(os.path.join(__snake_case ,"config.json" ) ) and os.path.isfile(
os.path.join(__snake_case ,"config.json" ) ):
os.remove(os.path.join(__snake_case ,"config.json" ) )
if os.path.exists(os.path.join(__snake_case ,"pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__snake_case ,"pytorch_model.bin" ) ):
os.remove(os.path.join(__snake_case ,"pytorch_model.bin" ) )
else:
os.makedirs(__snake_case )
model.save_pretrained(__snake_case )
def _lowercase ( __snake_case ,__snake_case=False ) -> Optional[Any]:
__lowerCAmelCase : int = 2
if unlogit:
__lowerCAmelCase : int = torch.pow(__snake_case ,__snake_case )
__lowerCAmelCase : List[Any] = p * torch.log(__snake_case )
__lowerCAmelCase : int = 0
return -plogp.sum(dim=-1 )
def _lowercase ( __snake_case ) -> List[Any]:
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(__snake_case ) ) ) )
for row in range(len(__snake_case ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=True ,__snake_case=True ,__snake_case=None ,__snake_case=False ) -> int:
__lowerCAmelCase , __lowerCAmelCase : Tuple = model.config.num_hidden_layers, model.config.num_attention_heads
__lowerCAmelCase : List[Any] = torch.zeros(__snake_case ,__snake_case ).to(args.device )
__lowerCAmelCase : List[Any] = torch.zeros(__snake_case ,__snake_case ).to(args.device )
if head_mask is None:
__lowerCAmelCase : Dict = torch.ones(__snake_case ,__snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=__snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : str = 0.0
__lowerCAmelCase : Tuple = 0.0
for step, inputs in enumerate(tqdm(__snake_case ,desc="Iteration" ,disable=args.local_rank not in [-1, 0] ) ):
__lowerCAmelCase : List[Any] = tuple(t.to(args.device ) for t in inputs )
((__lowerCAmelCase) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__lowerCAmelCase : Any = model(__snake_case ,labels=__snake_case ,head_mask=__snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__snake_case ):
__lowerCAmelCase : Any = entropy(attn.detach() ,__snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__lowerCAmelCase : int = 2
__lowerCAmelCase : Optional[Any] = torch.pow(torch.pow(__snake_case ,__snake_case ).sum(-1 ) ,1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
__lowerCAmelCase : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__snake_case )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__snake_case )
logger.info("Head ranked by importance scores" )
__lowerCAmelCase : str = torch.zeros(head_importance.numel() ,dtype=torch.long ,device=args.device )
__lowerCAmelCase : Dict = torch.arange(
head_importance.numel() ,device=args.device )
__lowerCAmelCase : Dict = head_ranks.view_as(__snake_case )
print_ad_tensor(__snake_case )
return attn_entropy, head_importance, total_loss
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = compute_heads_importance(__snake_case ,__snake_case ,__snake_case ,compute_entropy=__snake_case )
__lowerCAmelCase : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" ,__snake_case ,original_score * args.masking_threshold )
__lowerCAmelCase : List[Any] = torch.ones_like(__snake_case )
__lowerCAmelCase : Optional[int] = max(1 ,int(new_head_mask.numel() * args.masking_amount ) )
__lowerCAmelCase : str = original_score
while current_score >= original_score * args.masking_threshold:
__lowerCAmelCase : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__lowerCAmelCase : List[Any] = float("Inf" )
__lowerCAmelCase : Tuple = head_importance.view(-1 ).sort()[1]
if len(__snake_case ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__lowerCAmelCase : str = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" ,str(current_heads_to_mask.tolist() ) )
__lowerCAmelCase : Dict = new_head_mask.view(-1 )
__lowerCAmelCase : List[Any] = 0.0
__lowerCAmelCase : Optional[int] = new_head_mask.view_as(__snake_case )
__lowerCAmelCase : int = new_head_mask.clone().detach()
print_ad_tensor(__snake_case )
# Compute metric and head importance again
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = compute_heads_importance(
__snake_case ,__snake_case ,__snake_case ,compute_entropy=__snake_case ,head_mask=__snake_case )
__lowerCAmelCase : List[str] = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" ,__snake_case ,new_head_mask.sum() ,new_head_mask.sum() / new_head_mask.numel() * 100 ,)
logger.info("Final head mask" )
print_ad_tensor(__snake_case )
np.save(os.path.join(args.output_dir ,"head_mask.npy" ) ,head_mask.detach().cpu().numpy() )
return head_mask
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
__lowerCAmelCase : Optional[Any] = datetime.now()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = compute_heads_importance(
__snake_case ,__snake_case ,__snake_case ,compute_entropy=__snake_case ,compute_importance=__snake_case ,head_mask=__snake_case )
__lowerCAmelCase : List[Any] = 1 / loss
__lowerCAmelCase : List[str] = datetime.now() - before_time
__lowerCAmelCase : Dict = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = [
v,
]
assert sum(len(__snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__snake_case )
__lowerCAmelCase : Any = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : str = datetime.now()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = compute_heads_importance(
__snake_case ,__snake_case ,__snake_case ,compute_entropy=__snake_case ,compute_importance=__snake_case ,head_mask=__snake_case ,actually_pruned=__snake_case ,)
__lowerCAmelCase : Tuple = 1 / loss
__lowerCAmelCase : Dict = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" ,__snake_case ,__snake_case ,pruned_num_params / original_num_params * 100 ,)
logger.info("Pruning: score with masking: %f score with pruning: %f" ,__snake_case ,__snake_case )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" ,original_time / new_time * 100 )
save_model(__snake_case ,args.output_dir )
def _lowercase ( ) -> Optional[Any]:
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" ,default=__snake_case ,type=__snake_case ,required=__snake_case ,help="The input data dir. Should contain the .tsv files (or other data files) for the task." ,)
parser.add_argument(
"--model_name_or_path" ,default=__snake_case ,type=__snake_case ,required=__snake_case ,help="Path to pretrained model or model identifier from huggingface.co/models" ,)
parser.add_argument(
"--output_dir" ,default=__snake_case ,type=__snake_case ,required=__snake_case ,help="The output directory where the model predictions and checkpoints will be written." ,)
# Other parameters
parser.add_argument(
"--config_name" ,default="" ,type=__snake_case ,help="Pretrained config name or path if not the same as model_name_or_path" ,)
parser.add_argument(
"--tokenizer_name" ,default="" ,type=__snake_case ,help="Pretrained tokenizer name or path if not the same as model_name_or_path" ,)
parser.add_argument(
"--cache_dir" ,default=__snake_case ,type=__snake_case ,help="Where do you want to store the pre-trained models downloaded from s3" ,)
parser.add_argument(
"--data_subset" ,type=__snake_case ,default=-1 ,help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" ,action="store_true" ,help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" ,action="store_true" ,help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" ,action="store_true" ,help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" ,action="store_true" ,help="Don't normalize all importance scores between 0 and 1" ,)
parser.add_argument(
"--try_masking" ,action="store_true" ,help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" ,default=0.9 ,type=__snake_case ,help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." ,)
parser.add_argument(
"--masking_amount" ,default=0.1 ,type=__snake_case ,help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" ,default="acc" ,type=__snake_case ,help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" ,default=128 ,type=__snake_case ,help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) ,)
parser.add_argument("--batch_size" ,default=1 ,type=__snake_case ,help="Batch size." )
parser.add_argument("--seed" ,type=__snake_case ,default=42 )
parser.add_argument("--local_rank" ,type=__snake_case ,default=-1 ,help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" ,action="store_true" ,help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" ,type=__snake_case ,default="" ,help="Can be used for distant debugging." )
parser.add_argument("--server_port" ,type=__snake_case ,default="" ,help="Can be used for distant debugging." )
__lowerCAmelCase : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=__snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__lowerCAmelCase : Dict = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__lowerCAmelCase : List[str] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__lowerCAmelCase : List[Any] = torch.device("cuda" ,args.local_rank )
__lowerCAmelCase : Union[str, Any] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device ,args.n_gpu ,bool(args.local_rank != -1 ) ) )
__lowerCAmelCase : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__lowerCAmelCase : str = nn.parallel.DistributedDataParallel(
__snake_case ,device_ids=[args.local_rank] ,output_device=args.local_rank ,find_unused_parameters=__snake_case )
elif args.n_gpu > 1:
__lowerCAmelCase : Tuple = nn.DataParallel(__snake_case )
# Print/save training arguments
os.makedirs(args.output_dir ,exist_ok=__snake_case )
torch.save(__snake_case ,os.path.join(args.output_dir ,"run_args.bin" ) )
logger.info("Training/evaluation parameters %s" ,__snake_case )
# Prepare dataset
__lowerCAmelCase : List[Any] = np.concatenate(
[
np.loadtxt(args.data_dir ,dtype=np.intaa ),
] )
__lowerCAmelCase : Dict = (torch.from_numpy(__snake_case ),)
__lowerCAmelCase : Optional[Any] = TensorDataset(*__snake_case )
__lowerCAmelCase : List[str] = RandomSampler(__snake_case )
__lowerCAmelCase : List[str] = DataLoader(__snake_case ,sampler=__snake_case ,batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__snake_case ,__snake_case ,__snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__lowerCAmelCase : Tuple = mask_heads(__snake_case ,__snake_case ,__snake_case )
prune_heads(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if __name__ == "__main__":
main()
| 58
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[str] = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 58
| 1
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
lowercase__: Any = timeit.default_timer()
lowercase__: int = func(*__UpperCAmelCase , **__UpperCAmelCase )
lowercase__: Any = timeit.default_timer() - starttime
return delta
lowercase__: Dict = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=None ) -> List[Any]:
lowercase__: Optional[Any] = []
lowercase__: Any = seq_shapes or {}
for i in range(__UpperCAmelCase ):
lowercase__: Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
lowercase__: Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
lowercase__: Any = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowercase__: Optional[Any] = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
lowercase__: Optional[int] = v.feature
lowercase__: Any = seq_shapes[k]
lowercase__: List[str] = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
lowercase__: Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=None ) -> List[str]:
lowercase__: Any = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
lowercase__: int = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
lowercase__, lowercase__: Any = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
lowercase__: int = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 177
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple:
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ ={
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 1
|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
a__ : List[str] = 3_0_0 # TEMPERATURE (unit = K)
def _UpperCamelCase ( __A , __A , __A , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _UpperCamelCase ( __A , __A , __A=1024 , __A=1024 , __A=False , **__A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__A ):
UpperCamelCase__ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase__ = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase__ = get_lens(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase__ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 80
| 1
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
__snake_case : Dict = gray_code_sequence_string(__SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__snake_case : List[Any] = int(sequence[i] , 2 )
return sequence
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : int = gray_code_sequence_string(bit_count - 1 )
__snake_case : Union[str, Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : Any = """0""" + smaller_sequence[i]
sequence.append(__SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : str = """1""" + smaller_sequence[i]
sequence.append(__SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : List[str] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : int = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__snake_case : int = input_paths[compression_format]
if input_path is None:
__snake_case : int = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
__snake_case : Any = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__snake_case : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Union[str, Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
__snake_case : List[str] = tmp_path / """data_dot_dot"""
directory.mkdir()
__snake_case : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import tarfile
__snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
__snake_case : Tuple = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Optional[Any] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__snake_case : List[str] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 20
| 1
|
from math import pow
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase__ : Union[str, Any] = int(pow(_UpperCAmelCase , _UpperCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = backtrack(
_UpperCAmelCase , _UpperCAmelCase , current_number + 1 , _UpperCAmelCase , _UpperCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase__ , lowerCamelCase__ : str = backtrack(
_UpperCAmelCase , _UpperCAmelCase , current_number + 1 , _UpperCAmelCase , _UpperCAmelCase )
return current_sum, solutions_count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(_UpperCAmelCase , _UpperCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305
| 0
|
from typing import Dict, Optional
import numpy as np
import datasets
_lowerCamelCase : int = '''\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'''
_lowerCamelCase : Dict = '''\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'''
_lowerCamelCase : List[Any] = '''\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'''
def _a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Dict:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
SCREAMING_SNAKE_CASE__ : List[Any] = new_id
# turn into Numpy arrays
SCREAMING_SNAKE_CASE__ : int = np.array(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(__SCREAMING_SNAKE_CASE )
if reduce_labels:
SCREAMING_SNAKE_CASE__ : int = 2_55
SCREAMING_SNAKE_CASE__ : Dict = label - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2_55
SCREAMING_SNAKE_CASE__ : int = label != ignore_index
SCREAMING_SNAKE_CASE__ : int = np.not_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : List[Any] = pred_label[mask]
SCREAMING_SNAKE_CASE__ : str = np.array(__SCREAMING_SNAKE_CASE )[mask]
SCREAMING_SNAKE_CASE__ : List[str] = pred_label[pred_label == label]
SCREAMING_SNAKE_CASE__ : str = np.histogram(__SCREAMING_SNAKE_CASE , bins=__SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ : Tuple = np.histogram(__SCREAMING_SNAKE_CASE , bins=__SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.histogram(__SCREAMING_SNAKE_CASE , bins=__SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ : Tuple = intersect_and_union(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = total_intersect_and_union(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# compute metrics
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
SCREAMING_SNAKE_CASE__ : List[Any] = total_area_intersect / total_area_union
SCREAMING_SNAKE_CASE__ : List[Any] = total_area_intersect / total_area_label
SCREAMING_SNAKE_CASE__ : Optional[int] = np.nanmean(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.nanmean(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = all_acc
SCREAMING_SNAKE_CASE__ : Dict = iou
SCREAMING_SNAKE_CASE__ : Union[str, Any] = acc
if nan_to_num is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {metric: np.nan_to_num(__SCREAMING_SNAKE_CASE , nan=__SCREAMING_SNAKE_CASE ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ), reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
], )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : Tuple, _UpperCAmelCase : int, _UpperCAmelCase : bool, _UpperCAmelCase : Optional[int] = None, _UpperCAmelCase : Optional[Dict[int, int]] = None, _UpperCAmelCase : bool = False, ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = mean_iou(
results=_lowerCAmelCase, gt_seg_maps=_lowerCAmelCase, num_labels=_lowerCAmelCase, ignore_index=_lowerCAmelCase, nan_to_num=_lowerCAmelCase, label_map=_lowerCAmelCase, reduce_labels=_lowerCAmelCase, )
return iou_result
| 370
|
def _a ( SCREAMING_SNAKE_CASE__ : int = 50_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = set()
SCREAMING_SNAKE_CASE__ : Dict = int((limit - 24) ** (1 / 2) )
SCREAMING_SNAKE_CASE__ : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE__ : Optional[int] = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE__ : List[str] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE__ : Optional[int] = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 191
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.