code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
from collections.abc import Iterator
class lowercase :
def __init__( self ,A__):
lowercase = value
lowercase = None
lowercase = None
class lowercase :
def __init__( self ,A__):
lowercase = tree
def A__ ( self ,A__):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self):
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 0 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCAmelCase :
'''simple docstring'''
pass
| 102 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __snake_case ( UpperCamelCase_ ):
_a = '''M-CLIP'''
def __init__( self : int , A_ : int=1_0_2_4 , A_ : Optional[Any]=7_6_8 , **A_ : int):
lowerCAmelCase_ : List[str] = transformerDimSize
lowerCAmelCase_ : List[str] = imageDimSize
super().__init__(**A_)
class __snake_case ( UpperCamelCase_ ):
_a = MCLIPConfig
def __init__( self : Union[str, Any] , A_ : Union[str, Any] , *A_ : List[Any] , **A_ : int):
super().__init__(A_ , *A_ , **A_)
lowerCAmelCase_ : int = XLMRobertaModel(A_)
lowerCAmelCase_ : List[str] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def UpperCAmelCase__ ( self : Any , A_ : Any , A_ : Optional[int]):
lowerCAmelCase_ : Tuple = self.transformer(input_ids=A_ , attention_mask=A_)[0]
lowerCAmelCase_ : List[str] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(A_), embs
| 103 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = '''src/diffusers'''
lowerCAmelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase__ = spec.loader.load_module()
def _A ( A__ , A__ ):
"""simple docstring"""
return line.startswith(A__ ) or len(A__ ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , A__ ) is not None
def _A ( A__ ):
"""simple docstring"""
__lowercase = object_name.split('''.''' )
__lowercase = 0
# First let's find the module where our object lives.
__lowercase = parts[i]
while i < len(A__ ) and not os.path.isfile(os.path.join(A__ , F"{module}.py" ) ):
i += 1
if i < len(A__ ):
__lowercase = os.path.join(A__ , parts[i] )
if i >= len(A__ ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(A__ , F"{module}.py" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
# Now let's find the class / func in the code!
__lowercase = ''''''
__lowercase = 0
for name in parts[i + 1 :]:
while (
line_index < len(A__ ) and re.search(RF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A__ ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__lowercase = line_index
while line_index < len(A__ ) and _should_continue(lines[line_index] , A__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowercase = lines[start_index:line_index]
return "".join(A__ )
lowerCAmelCase__ = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCAmelCase__ = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCAmelCase__ = re.compile(R'''<FILL\s+[^>]*>''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = code.split('''\n''' )
__lowercase = 0
while idx < len(A__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(A__ ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def _A ( A__ ):
"""simple docstring"""
__lowercase = len(get_indent(A__ ) ) > 0
if has_indent:
__lowercase = F"class Bla:\n{code}"
__lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=A__ )
__lowercase = black.format_str(A__ , mode=A__ )
__lowercase , __lowercase = style_docstrings_in_code(A__ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def _A ( A__ , A__=False ):
"""simple docstring"""
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A__ ):
__lowercase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__lowercase , __lowercase , __lowercase = search.groups()
__lowercase = find_code_in_diffusers(A__ )
__lowercase = get_indent(A__ )
__lowercase = line_index + 1 if indent == theoretical_indent else line_index + 2
__lowercase = theoretical_indent
__lowercase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__lowercase = True
while line_index < len(A__ ) and should_continue:
line_index += 1
if line_index >= len(A__ ):
break
__lowercase = lines[line_index]
__lowercase = _should_continue(A__ , A__ ) and re.search(F"^{indent}# End copy" , A__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowercase = lines[start_index:line_index]
__lowercase = ''''''.join(A__ )
# Remove any nested `Copied from` comments to avoid circular copies
__lowercase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(A__ ) is None]
__lowercase = '''\n'''.join(A__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(A__ ) > 0:
__lowercase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__lowercase = [_re_replace_pattern.search(A__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__lowercase , __lowercase , __lowercase = pattern.groups()
__lowercase = re.sub(A__ , A__ , A__ )
if option.strip() == "all-casing":
__lowercase = re.sub(obja.lower() , obja.lower() , A__ )
__lowercase = re.sub(obja.upper() , obja.upper() , A__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__lowercase = blackify(lines[start_index - 1] + theoretical_code )
__lowercase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__lowercase = lines[:start_index] + [theoretical_code] + lines[line_index:]
__lowercase = start_index + 1
if overwrite and len(A__ ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A__ )
return diffs
def _A ( A__ = False ):
"""simple docstring"""
__lowercase = glob.glob(os.path.join(A__ , '''**/*.py''' ) , recursive=A__ )
__lowercase = []
for filename in all_files:
__lowercase = is_copy_consistent(A__ , A__ )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(A__ ) > 0:
__lowercase = '''\n'''.join(A__ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 104 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class __UpperCamelCase :
def __init__( self ) -> None:
a : list[Any] = []
a : int = 0
a : int = 0
def __a ( self ) -> bool:
return self.head == self.tail
def __a ( self , lowerCAmelCase__ ) -> None:
self.data.append(lowerCAmelCase__ )
a : List[Any] = self.tail + 1
def __a ( self ) -> Any:
a : int = self.data[self.head]
a : Union[str, Any] = self.head + 1
return ret
def __a ( self ) -> int:
return self.tail - self.head
def __a ( self ) -> None:
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> None:
a : Dict = data
a : MyNode | None = None
a : MyNode | None = None
a : int = 1
def __a ( self ) -> Any:
return self.data
def __a ( self ) -> MyNode | None:
return self.left
def __a ( self ) -> MyNode | None:
return self.right
def __a ( self ) -> int:
return self.height
def __a ( self , lowerCAmelCase__ ) -> None:
a : Optional[Any] = data
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = node
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = node
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[Any] = height
def _SCREAMING_SNAKE_CASE ( _lowercase : MyNode | None ) ->int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
if a > b:
return a
return b
def _SCREAMING_SNAKE_CASE ( _lowercase : MyNode ) ->MyNode:
'''simple docstring'''
print("left rotation node:" , node.get_data() )
a : List[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowercase )
a : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
a : int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowercase )
return ret
def _SCREAMING_SNAKE_CASE ( _lowercase : MyNode ) ->MyNode:
'''simple docstring'''
print("right rotation node:" , node.get_data() )
a : Tuple = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowercase )
a : Dict = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
a : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowercase )
return ret
def _SCREAMING_SNAKE_CASE ( _lowercase : MyNode ) ->MyNode:
'''simple docstring'''
a : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowercase ) )
return right_rotation(_lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : MyNode ) ->MyNode:
'''simple docstring'''
a : int = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowercase ) )
return left_rotation(_lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : MyNode | None , _lowercase : Any ) ->MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(_lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
a : Any = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
a : int = right_rotation(_lowercase )
else:
a : int = lr_rotation(_lowercase )
else:
node.set_right(insert_node(node.get_right() , _lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
a : Optional[int] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
a : Tuple = rl_rotation(_lowercase )
else:
a : List[Any] = left_rotation(_lowercase )
a : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
return node
def _SCREAMING_SNAKE_CASE ( _lowercase : MyNode ) ->Any:
'''simple docstring'''
while True:
a : Dict = root.get_right()
if right_child is None:
break
a : List[str] = right_child
return root.get_data()
def _SCREAMING_SNAKE_CASE ( _lowercase : MyNode ) ->Any:
'''simple docstring'''
while True:
a : str = root.get_left()
if left_child is None:
break
a : Any = left_child
return root.get_data()
def _SCREAMING_SNAKE_CASE ( _lowercase : MyNode , _lowercase : Any ) ->MyNode | None:
'''simple docstring'''
a : List[Any] = root.get_left()
a : Any = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
a : Optional[Any] = get_left_most(_lowercase )
root.set_data(_lowercase )
root.set_right(del_node(_lowercase , _lowercase ) )
elif left_child is not None:
a : Optional[int] = left_child
elif right_child is not None:
a : Optional[Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_lowercase , _lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowercase , _lowercase ) )
if get_height(_lowercase ) - get_height(_lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
a : Any = left_rotation(_lowercase )
else:
a : Optional[int] = rl_rotation(_lowercase )
elif get_height(_lowercase ) - get_height(_lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
a : Tuple = right_rotation(_lowercase )
else:
a : Tuple = lr_rotation(_lowercase )
a : List[Any] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowercase )
return root
class __UpperCamelCase :
def __init__( self ) -> None:
a : MyNode | None = None
def __a ( self ) -> int:
return get_height(self.root )
def __a ( self , lowerCAmelCase__ ) -> None:
print("insert:" + str(lowerCAmelCase__ ) )
a : List[Any] = insert_node(self.root , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> None:
print("delete:" + str(lowerCAmelCase__ ) )
if self.root is None:
print("Tree is empty!" )
return
a : List[str] = del_node(self.root , lowerCAmelCase__ )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
a : List[Any] = ""
a : Dict = MyQueue()
q.push(self.root )
a : Optional[Any] = self.get_height()
if layer == 0:
return output
a : Dict = 0
while not q.is_empty():
a : Optional[Any] = q.pop()
a : str = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase__ )
q.push(lowerCAmelCase__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
a : Union[str, Any] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase__ ) - 1:
a : Tuple = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a : Optional[Any] = AVLtree()
a : List[Any] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 105 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ["input_values", "padding_mask"]
def __init__( self : int ,lowercase_ : int = 1 ,lowercase_ : int = 2_4_0_0_0 ,lowercase_ : float = 0.0 ,lowercase_ : float = None ,lowercase_ : float = None ,**lowercase_ : Tuple ,):
super().__init__(feature_size=lowercase_ ,sampling_rate=lowercase_ ,padding_value=lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Optional[Any] = chunk_length_s
lowerCAmelCase__ : List[Any] = overlap
@property
def __lowerCAmelCase ( self : Tuple ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self : Optional[int] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : str ,lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowercase_ : Optional[Union[bool, str, PaddingStrategy]] = None ,lowercase_ : Optional[bool] = False ,lowercase_ : Optional[int] = None ,lowercase_ : Optional[Union[str, TensorType]] = None ,lowercase_ : Optional[int] = None ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : int = bool(
isinstance(lowercase_ ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowerCAmelCase__ : str = [np.asarray(lowercase_ ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowercase_ ,np.ndarray ):
lowerCAmelCase__ : List[Any] = np.asarray(lowercase_ ,dtype=np.floataa )
elif isinstance(lowercase_ ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : Tuple = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Dict = [np.asarray(lowercase_ ).T]
# verify inputs are valid
for idx, example in enumerate(lowercase_ ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : int = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowerCAmelCase__ : Optional[int] = min(array.shape[0] for array in raw_audio )
lowerCAmelCase__ : Any = int(np.floor(max_length / self.chunk_stride ) )
lowerCAmelCase__ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowerCAmelCase__ : Union[str, Any] = max(array.shape[0] for array in raw_audio )
lowerCAmelCase__ : Dict = int(np.ceil(max_length / self.chunk_stride ) )
lowerCAmelCase__ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowerCAmelCase__ : str = '''max_length'''
else:
lowerCAmelCase__ : Tuple = input_values
# normal padding on batch
if padded_inputs is None:
lowerCAmelCase__ : List[str] = self.pad(
lowercase_ ,max_length=lowercase_ ,truncation=lowercase_ ,padding=lowercase_ ,return_attention_mask=lowercase_ ,)
if padding:
lowerCAmelCase__ : Optional[int] = padded_inputs.pop('''attention_mask''' )
lowerCAmelCase__ : Union[str, Any] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
lowerCAmelCase__ : Optional[int] = example[..., None]
input_values.append(example.T )
lowerCAmelCase__ : Optional[int] = input_values
if return_tensors is not None:
lowerCAmelCase__ : int = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
| 106 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 0 |
import argparse
import copy
def __magic_name__ ( A : int ):
'''simple docstring'''
a = {}
with open(A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
a = []
_list.append([line.split()[1], line.split()[2]] )
a = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
a = []
_list.append([line.split()[0], line.split()[2]] )
a = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __magic_name__ ( A : Optional[int], A : Dict ):
'''simple docstring'''
with open(A ) as f:
a = f.read(1 )
a = start_node
a = []
a = start_node
a = 0
while visiting not in first_solution:
a = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(A ) and k[0] not in first_solution:
a = k[1]
a = k[0]
first_solution.append(A )
a = distance_of_first_solution + int(A )
a = best_node
first_solution.append(A )
a = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
a = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def __magic_name__ ( A : Tuple, A : List[str] ):
'''simple docstring'''
a = []
for n in solution[1:-1]:
a = solution.index(A )
for kn in solution[1:-1]:
a = solution.index(A )
if n == kn:
continue
a = copy.deepcopy(A )
a = kn
a = n
a = 0
for k in _tmp[:-1]:
a = _tmp[_tmp.index(A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
a = distance + int(i[1] )
_tmp.append(A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
a = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __magic_name__ ( A : Any, A : List[str], A : Optional[int], A : Dict, A : List[Any] ):
'''simple docstring'''
a = 1
a = first_solution
a = []
a = distance_of_first_solution
a = solution
while count <= iters:
a = find_neighborhood(A, A )
a = 0
a = neighborhood[index_of_best_solution]
a = len(A ) - 1
a = False
while not found:
a = 0
while i < len(A ):
if best_solution[i] != solution[i]:
a = best_solution[i]
a = solution[i]
break
a = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
a = True
a = best_solution[:-1]
a = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
a = cost
a = solution
else:
a = index_of_best_solution + 1
a = neighborhood[index_of_best_solution]
if len(A ) >= size:
tabu_list.pop(0 )
a = count + 1
return best_solution_ever, best_cost
def __magic_name__ ( A : Optional[Any]=None ):
'''simple docstring'''
a = generate_neighbours(args.File )
a , a = generate_first_solution(
args.File, A )
a , a = tabu_search(
A, A, A, args.Iterations, args.Size, )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 107 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
lowerCAmelCase__ = 8.314462 # Unit - J mol-1 K-1
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 108 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A: str = logging.get_logger(__name__)
A: str = "▁"
A: Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
A: Union[str, Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
A: Dict = {
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
A: List[Any] = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = VOCAB_FILES_NAMES
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : int = ['input_ids', 'attention_mask']
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase : Any = legacy_behaviour
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase : str = 1
UpperCAmelCase : Optional[Any] = len(self.sp_model )
UpperCAmelCase : Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
UpperCAmelCase : Any = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase : Optional[int] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase : List[str] = src_lang if src_lang is not None else """eng_Latn"""
UpperCAmelCase : str = self.lang_code_to_id[self._src_lang]
UpperCAmelCase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = [1] * len(self.prefix_tokens )
UpperCAmelCase : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase : Any = src_lang
UpperCAmelCase : Dict = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = """""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "eng_Latn" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "fra_Latn" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : Tuple = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : List[Any] = [self.cur_lang_code]
UpperCAmelCase : Optional[Any] = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCAmelCase : Any = []
UpperCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : Tuple = [self.cur_lang_code]
UpperCAmelCase : Optional[int] = [self.eos_token_id]
| 109 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> List[str]:
"""simple docstring"""
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCAmelCase_ : Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ : str = 1
if upper_limit > 0:
lowerCAmelCase_ : int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowercase__ : Union[str, Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 224 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
__lowerCAmelCase : Union[str, Any] = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
__lowerCAmelCase : Dict = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
__lowerCAmelCase : List[str] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__UpperCamelCase , __UpperCamelCase , sample_weight=__UpperCamelCase ) ),
}
| 88 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
"""simple docstring"""
from __future__ import annotations
a = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : dict[str, list[str]] , _UpperCAmelCase : str ):
_A = graph
# mapping node to its parent in resulting breadth first tree
_A = {}
_A = source_vertex
def lowerCAmelCase_ ( self : str ):
_A = {self.source_vertex}
_A = None
_A = [self.source_vertex] # first in first out queue
while queue:
_A = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__UpperCamelCase )
_A = vertex
queue.append(__UpperCamelCase )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
_A = self.parent.get(__UpperCamelCase )
if target_vertex_parent is None:
_A = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__UpperCamelCase )
return self.shortest_path(__UpperCamelCase ) + F'''->{target_vertex}'''
if __name__ == "__main__":
a = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 315 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCamelCase = s_dict.pop(_SCREAMING_SNAKE_CASE )
elif "subsample" in key:
UpperCamelCase = s_dict.pop(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCamelCase = emb.weight.data
return lin_layer
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
UpperCamelCase = mam_aaa["args"]
UpperCamelCase = mam_aaa["model"]
UpperCamelCase = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
rename_keys(_SCREAMING_SNAKE_CASE )
UpperCamelCase = state_dict["decoder.embed_tokens.weight"].shape[0]
UpperCamelCase = args.share_decoder_input_output_embed
UpperCamelCase = [int(_SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split("," )]
UpperCamelCase = SpeechaTextConfig(
vocab_size=_SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(_SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=_SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=200 , use_cache=_SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = SpeechaTextForConditionalGeneration(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase = lm_head_weights
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 153 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
import baseaa
def __snake_case ( _UpperCAmelCase ):
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def __snake_case ( _UpperCAmelCase ):
return baseaa.aaadecode(_UpperCAmelCase ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE : Dict = {'target_lang': 'fi', 'source_lang': 'en'}
_SCREAMING_SNAKE_CASE : List[str] = '>>zh<<'
_SCREAMING_SNAKE_CASE : List[Any] = 'Helsinki-NLP/'
if is_torch_available():
_SCREAMING_SNAKE_CASE : int = 'pt'
elif is_tf_available():
_SCREAMING_SNAKE_CASE : Tuple = 'tf'
else:
_SCREAMING_SNAKE_CASE : str = 'jax'
@require_sentencepiece
class A__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MarianTokenizer
__magic_name__ = False
__magic_name__ = True
def a_ ( self ):
super().setUp()
snake_case = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
snake_case = Path(self.tmpdirname )
save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self , **__snake_case ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def a_ ( self , __snake_case ):
return (
"This is a test",
"This is a test",
)
def a_ ( self ):
snake_case = '''</s>'''
snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def a_ ( self ):
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCamelCase ) , 9 )
def a_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a_ ( self ):
snake_case = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
snake_case = en_de_tokenizer(['''I am a small frog'''] , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
snake_case = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__UpperCamelCase , batch.input_ids[0] )
snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCamelCase )
snake_case = [x.name for x in Path(__UpperCamelCase ).glob('''*''' )]
self.assertIn('''source.spm''' , __UpperCamelCase )
MarianTokenizer.from_pretrained(__UpperCamelCase )
def a_ ( self ):
snake_case = self.get_tokenizer()
snake_case = tok(
['''I am a small frog''' * 1_0_0_0, '''I am a small frog'''] , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def a_ ( self ):
snake_case = self.get_tokenizer()
snake_case = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=__UpperCamelCase , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def a_ ( self ):
# fmt: off
snake_case = {'''input_ids''': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def a_ ( self ):
snake_case = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
snake_case = '''Tämä on testi'''
snake_case = '''This is a test'''
snake_case = [7_6, 7, 2_0_4_7, 2]
snake_case = [6_9, 1_2, 1_1, 9_4_0, 2]
snake_case = tokenizer(__UpperCamelCase ).input_ids
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case = tokenizer(text_target=__UpperCamelCase ).input_ids
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case = tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 127 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
"""simple docstring"""
import math
import random
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] = False ) -> List[Any]:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase__ = 0.02
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> Union[str, Any]:
_snake_case = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
_snake_case = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_snake_case = (expected / 1_00) - layer_a
# Error delta
_snake_case = layer_1_error * sigmoid_function(__lowerCamelCase , __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input('Expected value: '))
UpperCAmelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 288 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowercase__ ( lowercase_ , lowercase_):
UpperCamelCase_ = """convnextv2"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[Any]=1E-12 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Optional[Any]=224 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = num_stages
SCREAMING_SNAKE_CASE : int = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE : str = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 182 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
A__ : str = {str(digit): digit**5 for digit in range(10)}
def a ( lowerCamelCase_ ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase_ ) )
def a ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(lowerCamelCase_ ) )
if __name__ == "__main__":
print(solution())
| 207 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( lowercase_ , unittest.TestCase ):
__lowerCamelCase : List[Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCAmelCase__ ( self , snake_case__=0 ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCamelCase ) )
UpperCAmelCase : Optional[int] =np.random.RandomState(__UpperCamelCase )
UpperCAmelCase : int ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase : List[str] =self.get_dummy_inputs()
UpperCAmelCase : int =pipe(**__UpperCamelCase ).images
UpperCAmelCase : Tuple =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Optional[int] =np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase : List[Any] =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase : Optional[int] =self.get_dummy_inputs()
UpperCAmelCase : str =pipe(**__UpperCamelCase ).images
UpperCAmelCase : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Union[str, Any] =np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase : Dict =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
UpperCAmelCase : Dict =pipe(**self.get_dummy_inputs() )
UpperCAmelCase : int =self.get_dummy_inputs()
UpperCAmelCase : List[str] =pipe(**__UpperCamelCase ).images
UpperCAmelCase : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Union[str, Any] =np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase : List[str] =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase : List[str] =self.get_dummy_inputs()
UpperCAmelCase : Any =pipe(**__UpperCamelCase ).images
UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Dict =np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase : str =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase : List[str] =self.get_dummy_inputs()
UpperCAmelCase : List[str] =pipe(**__UpperCamelCase ).images
UpperCAmelCase : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Union[str, Any] =np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase : Union[str, Any] =self.get_dummy_inputs()
UpperCAmelCase : int =pipe(**__UpperCamelCase ).images
UpperCAmelCase : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Any =np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] =ort.SessionOptions()
UpperCAmelCase : Optional[Any] =False
return options
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase : int =init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase : str ='''A fantasy landscape, trending on artstation'''
UpperCAmelCase : Optional[int] =np.random.RandomState(0 )
UpperCAmelCase : Union[str, Any] =pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type='''np''' , )
UpperCAmelCase : int =output.images
UpperCAmelCase : List[str] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase : str =np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase : Optional[Any] =init_image.resize((768, 512) )
UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase : Dict =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase : List[str] ='''A fantasy landscape, trending on artstation'''
UpperCAmelCase : List[Any] =np.random.RandomState(0 )
UpperCAmelCase : Optional[int] =pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type='''np''' , )
UpperCAmelCase : Tuple =output.images
UpperCAmelCase : str =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase : List[Any] =np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 348 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class A ( lowercase_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=__UpperCamelCase , required=__UpperCamelCase , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=__UpperCamelCase , required=__UpperCamelCase , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=__UpperCamelCase , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=__UpperCamelCase , default=__UpperCamelCase , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=__UpperCamelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , ):
__lowercase= logging.get_logger('transformers-cli/converting' )
self._logger.info(f'Loading model {model_type}' )
__lowercase= model_type
__lowercase= tf_checkpoint
__lowercase= pytorch_dump_output
__lowercase= config
__lowercase= finetuning_task_name
def _A (self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
__lowercase= self._tf_checkpoint
__lowercase= ''
else:
__lowercase= self._tf_checkpoint
__lowercase= ''
convert_transfo_xl_checkpoint_to_pytorch(
__UpperCamelCase , self._config , self._pytorch_dump_output , __UpperCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 295 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """sew-d"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Tuple=1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : int=5_1_2 , SCREAMING_SNAKE_CASE_ : Any=2_5_6 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=("p2c", "c2p") , SCREAMING_SNAKE_CASE_ : str="layer_norm" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu_python" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1E-7 , SCREAMING_SNAKE_CASE_ : Optional[int]=1E-5 , SCREAMING_SNAKE_CASE_ : List[Any]="group" , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : str=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE_ : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_ : Optional[int]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Any=1_2_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Any=0.05 , SCREAMING_SNAKE_CASE_ : Dict=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=1_0 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : int="mean" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : List[str]=2 , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : int = feat_extract_norm
lowerCAmelCase_ : Optional[Any] = feat_extract_activation
lowerCAmelCase_ : List[str] = list(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = list(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = list(__UpperCamelCase )
lowerCAmelCase_ : List[str] = conv_bias
lowerCAmelCase_ : Dict = num_conv_pos_embeddings
lowerCAmelCase_ : List[Any] = num_conv_pos_embedding_groups
lowerCAmelCase_ : Union[str, Any] = len(self.conv_dim )
lowerCAmelCase_ : List[str] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = intermediate_size
lowerCAmelCase_ : List[str] = squeeze_factor
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : Any = position_buckets
lowerCAmelCase_ : Any = share_att_key
lowerCAmelCase_ : Optional[Any] = relative_attention
lowerCAmelCase_ : List[Any] = norm_rel_ebd
lowerCAmelCase_ : Dict = list(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Tuple = hidden_dropout
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : List[str] = activation_dropout
lowerCAmelCase_ : Optional[int] = feat_proj_dropout
lowerCAmelCase_ : Any = final_dropout
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : Dict = feature_layer_norm_eps
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ : List[Any] = apply_spec_augment
lowerCAmelCase_ : str = mask_time_prob
lowerCAmelCase_ : Any = mask_time_length
lowerCAmelCase_ : Optional[int] = mask_time_min_masks
lowerCAmelCase_ : Dict = mask_feature_prob
lowerCAmelCase_ : Optional[int] = mask_feature_length
lowerCAmelCase_ : Optional[Any] = mask_feature_min_masks
# ctc loss
lowerCAmelCase_ : int = ctc_loss_reduction
lowerCAmelCase_ : str = ctc_zero_infinity
# sequence classification
lowerCAmelCase_ : List[str] = use_weighted_layer_sum
lowerCAmelCase_ : Union[str, Any] = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 224 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = name
__magic_name__ = val
def __str__( self : str ) -> str:
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : List[Any] , UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.val < other.val
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = {}
__magic_name__ = {}
__magic_name__ = self.build_heap(__UpperCamelCase )
def __getitem__( self : int , UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
return self.get_value(__UpperCamelCase )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
return (idx - 1) // 2
def _lowercase ( self : int , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
return idx * 2 + 1
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return idx * 2 + 2
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
return self.heap_dict[key]
def _lowercase ( self : int , UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = len(__UpperCamelCase ) - 1
__magic_name__ = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
__magic_name__ = idx
__magic_name__ = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def _lowercase ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
while True:
__magic_name__ = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
__magic_name__ = self.get_right_child_idx(__UpperCamelCase )
__magic_name__ = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
__magic_name__ = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
__magic_name__ = r
if smallest != idx:
__magic_name__ , __magic_name__ = array[smallest], array[idx]
(
(
__magic_name__
) , (
__magic_name__
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__magic_name__ = smallest
else:
break
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
__magic_name__ , __magic_name__ = self.heap[idx], self.heap[p]
__magic_name__ , __magic_name__ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__magic_name__ = p
__magic_name__ = self.get_parent_idx(__UpperCamelCase )
def _lowercase ( self : Any ) -> Tuple:
"""simple docstring"""
return self.heap[0]
def _lowercase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.heap[-1], self.heap[0]
__magic_name__ , __magic_name__ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__magic_name__ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
self.heap.append(__UpperCamelCase )
__magic_name__ = len(self.heap ) - 1
__magic_name__ = node.val
self.sift_up(len(self.heap ) - 1 )
def _lowercase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return len(self.heap ) == 0
def _lowercase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__magic_name__ = new_value
__magic_name__ = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase : Optional[int] = Node('R', -1)
__lowerCAmelCase : Tuple = Node('B', 6)
__lowerCAmelCase : Tuple = Node('A', 3)
__lowerCAmelCase : Optional[int] = Node('X', 1)
__lowerCAmelCase : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 315 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCamelCase = DetaConfig(
backbone_config=_SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=_SCREAMING_SNAKE_CASE , with_box_refine=_SCREAMING_SNAKE_CASE , two_stage=_SCREAMING_SNAKE_CASE , )
# set labels
UpperCamelCase = "huggingface/label-files"
if "o365" in model_name:
UpperCamelCase = 366
UpperCamelCase = "object365-id2label.json"
else:
UpperCamelCase = 91
UpperCamelCase = "coco-detection-id2label.json"
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = dct.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
UpperCamelCase = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:dim, :]
UpperCamelCase = in_proj_bias[: dim]
UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase = in_proj_bias[
dim : dim * 2
]
UpperCamelCase = in_proj_weight[
-dim :, :
]
UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:hidden_size, :]
UpperCamelCase = in_proj_bias[:hidden_size]
UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase = in_proj_weight[-hidden_size:, :]
UpperCamelCase = in_proj_bias[-hidden_size:]
def a__ ( ):
"""simple docstring"""
UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_deta_config(_SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_SCREAMING_SNAKE_CASE , param.shape )
# rename keys
UpperCamelCase = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(_SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
if "input_proj" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = DetaForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_SCREAMING_SNAKE_CASE )
# load image processor
UpperCamelCase = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCamelCase = prepare_img()
UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase = encoding["pixel_values"]
UpperCamelCase = model(pixel_values.to(_SCREAMING_SNAKE_CASE ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
UpperCamelCase = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
UpperCamelCase = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 153 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = args.log_outputs
__a = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
__a = load_metric('''wer''' )
__a = load_metric('''cer''' )
# compute metrics
__a = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
__a = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
__a = f'WER: {wer_result}\nCER: {cer_result}'
print(_UpperCAmelCase )
with open(f'{dataset_id}_eval_results.txt' , '''w''' ) as f:
f.write(_UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__a = f'log_{dataset_id}_predictions.txt'
__a = f'log_{dataset_id}_targets.txt'
with open(_UpperCAmelCase , '''w''' ) as p, open(_UpperCAmelCase , '''w''' ) as t:
# mapping function to write output
def write_to_file(_UpperCAmelCase , _UpperCAmelCase ):
p.write(f'{i}' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f'{i}' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_UpperCAmelCase , with_indices=_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = '''[,?.!\-\;\:\"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__a = re.sub(_UpperCAmelCase , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__a = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
__a = ''' '''.join(text.split(_UpperCAmelCase ) )
return text
def __snake_case ( _UpperCAmelCase ):
# load dataset
__a = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__a = AutoFeatureExtractor.from_pretrained(args.model_id )
__a = feature_extractor.sampling_rate
# resample audio
__a = dataset.cast_column('''audio''' , Audio(sampling_rate=_UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
__a = 0 if torch.cuda.is_available() else -1
__a = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_UpperCAmelCase ):
__a = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__a = prediction['''text''']
__a = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
__a = dataset.map(_UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
__snake_case :Tuple = parser.parse_args()
main(args)
| 49 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class A__ :
"""simple docstring"""
@staticmethod
def a_ ( *__snake_case , **__snake_case ):
pass
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = pipeline(
'''document-question-answering''' , model=__UpperCamelCase , tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case = INVOICE_URL
snake_case = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
snake_case = '''What is the placebo?'''
snake_case = [
{
'''image''': load_image(__UpperCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a_ ( self , __snake_case , __snake_case ):
snake_case = dqa_pipeline(__UpperCamelCase , top_k=2 )
self.assertEqual(
__UpperCamelCase , [
[
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a_ ( self ):
snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
snake_case = INVOICE_URL
snake_case = '''How many cats are there?'''
snake_case = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 3_8, '''end''': 3_9},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 3_8, '''end''': 4_0},
]
snake_case = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
snake_case = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
snake_case = []
snake_case = []
snake_case = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , words=__UpperCamelCase , boxes=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a_ ( self ):
snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
snake_case = INVOICE_URL
snake_case = '''What is the invoice number?'''
snake_case = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a_ ( self ):
snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=5_0 , )
snake_case = INVOICE_URL
snake_case = '''What is the invoice number?'''
snake_case = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a_ ( self ):
snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , )
snake_case = INVOICE_URL
snake_case = '''What is the invoice number?'''
snake_case = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
]
]
* 2 , )
snake_case = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a_ ( self ):
snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , max_seq_len=5_0 , )
snake_case = INVOICE_URL
snake_case = '''What is the invoice number?'''
snake_case = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
snake_case = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
@slow
@require_torch
def a_ ( self ):
snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
snake_case = INVOICE_URL
snake_case = '''What is the invoice number?'''
snake_case = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a_ ( self ):
pass
| 127 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : List[Any]=30 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : int=3 , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Any=32 , _lowerCamelCase : Dict=2 , _lowerCamelCase : Tuple=4 , _lowerCamelCase : Union[str, Any]=37 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Any=10 , _lowerCamelCase : Optional[int]=0.0_2 , _lowerCamelCase : str=3 , _lowerCamelCase : Optional[int]=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 1
def lowercase ( self : int ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def lowercase ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
_snake_case = TFViTModel(config=__UpperCamelCase )
_snake_case = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_snake_case = self.image_size // 2
_snake_case = pixel_values[:, :, :image_size, :image_size]
_snake_case = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase )
_snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase ( self : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
_snake_case = self.type_sequence_label_size
_snake_case = TFViTForImageClassification(__UpperCamelCase )
_snake_case = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_snake_case = self.image_size // 2
_snake_case = pixel_values[:, :, :image_size, :image_size]
_snake_case = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFViTForImageClassification(__UpperCamelCase )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self : Dict ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
__a = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__a = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
__a = False
__a = False
__a = False
def lowercase ( self : Union[str, Any] ):
_snake_case = TFViTModelTester(self )
_snake_case = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowercase ( self : Any ):
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : List[str] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , tf.keras.layers.Layer ) )
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__UpperCamelCase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase ( self : Tuple ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowercase ( self : Any ):
_snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : str ):
_snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=__UpperCamelCase , return_tensors='''tf''' )
# forward pass
_snake_case = model(**__UpperCamelCase )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 )
| 288 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 0 |
__UpperCamelCase : Any = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : Tuple = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE : Any = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : Optional[int] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : int = list(_lowercase )
new_path.append(_lowercase )
queue.append(_lowercase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowercase )
# in case there's no path between the 2 nodes
return []
def A ( _lowercase , _lowercase , _lowercase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : Tuple = [start]
SCREAMING_SNAKE_CASE : Any = set(_lowercase )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : Optional[Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[Any] = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE : str = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowercase )
queue.append(_lowercase )
SCREAMING_SNAKE_CASE : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 182 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 0 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if num < 0:
return False
lowercase__ = num
lowercase__ = 0
while num > 0:
lowercase__ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
from __future__ import annotations
class __snake_case :
def __init__( self , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple =order
# a_{0} ... a_{k}
UpperCAmelCase : List[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : int =[0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Any =[0.0] * self.order
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
if len(__UpperCamelCase ) < self.order:
UpperCAmelCase : Tuple =[1.0, *a_coeffs]
if len(__UpperCamelCase ) != self.order + 1:
UpperCAmelCase : Union[str, Any] =(
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != self.order + 1:
UpperCAmelCase : List[Any] =(
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
UpperCAmelCase : int =a_coeffs
UpperCAmelCase : Dict =b_coeffs
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Dict =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : Optional[Any] =self.input_history[:-1]
UpperCAmelCase : List[Any] =self.output_history[:-1]
UpperCAmelCase : List[Any] =sample
UpperCAmelCase : str =result
return result
| 348 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class A ( lowercase_ ):
UpperCamelCase_ : Any ='''xglm'''
UpperCamelCase_ : Any =['''past_key_values''']
UpperCamelCase_ : Any ={
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , lowerCAmelCase=2_5_6_0_0_8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=2_4 , lowerCAmelCase=1_6 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= ffn_dim
__lowercase= num_layers
__lowercase= attention_heads
__lowercase= activation_function
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= layerdrop
__lowercase= init_std
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase= use_cache
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
| 295 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
lowerCAmelCase_ : str = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCAmelCase_ : Optional[int] = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
lowerCAmelCase_ : List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = [1, 3, 0, 5, 8, 5]
lowercase__ : Optional[int] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 224 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class UpperCAmelCase_ ( lowercase_ ):
'''simple docstring'''
a__ = """bridgetower_vision_model"""
def __init__( self : Tuple , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Union[str, Any]=288 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Dict=1E-05 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=False , **UpperCamelCase__ : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__UpperCamelCase )
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_channels
__magic_name__ = patch_size
__magic_name__ = image_size
__magic_name__ = initializer_factor
__magic_name__ = layer_norm_eps
__magic_name__ = stop_gradient
__magic_name__ = share_layernorm
__magic_name__ = remove_last_layer
@classmethod
def _lowercase ( cls : int , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ , __magic_name__ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
__magic_name__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class UpperCAmelCase_ ( lowercase_ ):
'''simple docstring'''
a__ = """bridgetower_text_model"""
def __init__( self : List[Any] , UpperCamelCase__ : Any=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : str=514 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Dict=1E-05 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Union[str, Any]=True , **UpperCamelCase__ : Any , ) -> str:
"""simple docstring"""
super().__init__(**__UpperCamelCase )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_factor
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = use_cache
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = eos_token_id
@classmethod
def _lowercase ( cls : Dict , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
__magic_name__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class UpperCAmelCase_ ( lowercase_ ):
'''simple docstring'''
a__ = """bridgetower"""
def __init__( self : List[str] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Union[str, Any]=768 , UpperCamelCase__ : int=1 , UpperCamelCase__ : List[Any]=1E-05 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Dict="add" , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict , ) -> Any:
"""simple docstring"""
__magic_name__ = kwargs.pop("""text_config_dict""" , __UpperCamelCase )
__magic_name__ = kwargs.pop("""vision_config_dict""" , __UpperCamelCase )
super().__init__(**__UpperCamelCase )
__magic_name__ = share_cross_modal_transformer_layers
__magic_name__ = hidden_act
__magic_name__ = hidden_size
__magic_name__ = initializer_factor
__magic_name__ = layer_norm_eps
__magic_name__ = share_link_tower_layers
__magic_name__ = link_tower_type
__magic_name__ = num_attention_heads
__magic_name__ = num_hidden_layers
__magic_name__ = tie_word_embeddings
__magic_name__ = init_layernorm_from_vision_encoder
if text_config is None:
__magic_name__ = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
__magic_name__ = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
__magic_name__ = BridgeTowerTextConfig(**__UpperCamelCase )
__magic_name__ = BridgeTowerVisionConfig(**__UpperCamelCase )
@classmethod
def _lowercase ( cls : str , UpperCamelCase__ : BridgeTowerTextConfig , UpperCamelCase__ : BridgeTowerVisionConfig , **UpperCamelCase__ : int ) -> int:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCamelCase )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = copy.deepcopy(self.__dict__ )
__magic_name__ = self.text_config.to_dict()
__magic_name__ = self.vision_config.to_dict()
__magic_name__ = self.__class__.model_type
return output
| 88 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( _snake_case : Union[str, Any] = "https://www.worldometers.info/coronavirus" ) -> str:
'''simple docstring'''
_A = BeautifulSoup(requests.get(_snake_case ).text , 'html.parser' )
_A = soup.findAll('h1' )
_A = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_snake_case , _snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 315 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class _lowerCamelCase ( lowercase_ ):
UpperCAmelCase_ = "dpr"
def __init__(self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=1e-1_2 , __a=0 , __a="absolute" , __a = 0 , **__a , ) -> Union[str, Any]:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = projection_dim
UpperCamelCase = position_embedding_type
| 153 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__snake_case :List[Any] = logging.get_logger(__name__)
__snake_case :Tuple = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCAmelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.' )
if i == 0:
__a , __a = (
(Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase )
else:
return _interleave_iterable_datasets(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCAmelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.' )
if i == 0:
__a , __a = (
(Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase )
else:
return _concatenate_iterable_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase )
| 49 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A__ ( lowercase_ ):
"""simple docstring"""
__magic_name__ = 'vivit'
def __init__( self , __snake_case=2_2_4 , __snake_case=3_2 , __snake_case=[2, 1_6, 1_6] , __snake_case=3 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu_fast" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1E-06 , __snake_case=True , **__snake_case , ):
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = num_frames
snake_case = tubelet_size
snake_case = num_channels
snake_case = qkv_bias
super().__init__(**__UpperCamelCase )
| 127 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCAmelCase__ ( lowercase_ ):
__a = 42
__a = 42
class lowerCAmelCase__ ( lowercase_ , lowercase_ ):
__a = 1
@register_to_config
def __init__( self : Tuple , _lowerCamelCase : int = 2000 , _lowerCamelCase : float = 0.1_5 , _lowerCamelCase : float = 0.0_1 , _lowerCamelCase : float = 1348.0 , _lowerCamelCase : float = 1e-5 , _lowerCamelCase : int = 1 , ):
# standard deviation of the initial noise distribution
_snake_case = sigma_max
# setable values
_snake_case = None
self.set_sigmas(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase ( self : Any , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : Optional[int] = None ):
return sample
def lowercase ( self : int , _lowerCamelCase : int , _lowerCamelCase : float = None , _lowerCamelCase : Union[str, torch.device] = None ):
_snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_snake_case = torch.linspace(1 , __UpperCamelCase , __UpperCamelCase , device=__UpperCamelCase )
def lowercase ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : float = None , _lowerCamelCase : float = None , _lowerCamelCase : float = None ):
_snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
_snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
_snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__UpperCamelCase , __UpperCamelCase )
_snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_snake_case = torch.exp(torch.linspace(math.log(__UpperCamelCase ) , math.log(__UpperCamelCase ) , __UpperCamelCase ) )
_snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowercase ( self : Optional[int] , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : Optional[torch.Generator] = None , _lowerCamelCase : bool = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_snake_case = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_snake_case = timesteps.to(self.discrete_sigmas.device )
_snake_case = self.discrete_sigmas[timesteps].to(sample.device )
_snake_case = self.get_adjacent_sigma(__UpperCamelCase , __UpperCamelCase ).to(sample.device )
_snake_case = torch.zeros_like(__UpperCamelCase )
_snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_snake_case = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_snake_case = diffusion.unsqueeze(-1 )
_snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=__UpperCamelCase , device=sample.device , dtype=sample.dtype )
_snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__UpperCamelCase , prev_sample_mean=__UpperCamelCase )
def lowercase ( self : Any , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : Optional[torch.Generator] = None , _lowerCamelCase : bool = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=__UpperCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_snake_case = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_snake_case = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_snake_case = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_snake_case = step_size.unsqueeze(-1 )
_snake_case = sample + step_size * model_output
_snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCamelCase )
def lowercase ( self : Any , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_snake_case = timesteps.to(original_samples.device )
_snake_case = self.discrete_sigmas.to(original_samples.device )[timesteps]
_snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__UpperCamelCase ) * sigmas[:, None, None, None]
)
_snake_case = noise + original_samples
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 288 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : List[str] = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MobileViTFeatureExtractor']
__UpperCamelCase : List[str] = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Any=2, lowerCamelCase : int=True, lowerCamelCase : Any=False, lowerCamelCase : Tuple=10, lowerCamelCase : List[str]=3, lowerCamelCase : Optional[Any]=32 * 4, lowerCamelCase : Any=32 * 6, lowerCamelCase : Union[str, Any]=4, lowerCamelCase : int=32, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = is_training
lowercase__ = use_auxiliary_loss
lowercase__ = num_queries
lowercase__ = num_channels
lowercase__ = min_size
lowercase__ = max_size
lowercase__ = num_labels
lowercase__ = mask_feature_size
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCamelCase )
lowercase__ = torch.ones([self.batch_size, self.min_size, self.max_size], device=__UpperCamelCase )
lowercase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=__UpperCamelCase ) > 0.5
).float()
lowercase__ = (torch.rand((self.batch_size, self.num_labels), device=__UpperCamelCase ) > 0.5).long()
lowercase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1], ), decoder_config=DetrConfig(
decoder_ffn_dim=128, num_queries=self.num_queries, decoder_attention_heads=2, d_model=self.mask_feature_size, ), mask_feature_size=self.mask_feature_size, fpn_feature_size=self.mask_feature_size, num_channels=self.num_channels, num_labels=self.num_labels, )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.prepare_config_and_inputs()
lowercase__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase__ ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = output.encoder_hidden_states
lowercase__ = output.pixel_decoder_hidden_states
lowercase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCamelCase ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCamelCase ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCamelCase ), config.decoder_config.decoder_layers )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : int=False ):
'''simple docstring'''
with torch.no_grad():
lowercase__ = MaskFormerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(pixel_values=__UpperCamelCase, pixel_mask=__UpperCamelCase )
lowercase__ = model(__UpperCamelCase, output_hidden_states=__UpperCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.mask_feature_size), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCamelCase, __UpperCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = MaskFormerForInstanceSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
def comm_check_on_output(lowerCamelCase : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase__ = model(pixel_values=__UpperCamelCase, pixel_mask=__UpperCamelCase )
lowercase__ = model(__UpperCamelCase )
comm_check_on_output(__UpperCamelCase )
lowercase__ = model(
pixel_values=__UpperCamelCase, pixel_mask=__UpperCamelCase, mask_labels=__UpperCamelCase, class_labels=__UpperCamelCase )
comm_check_on_output(__UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( lowercase_ ,lowercase_ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = MaskFormerModelTester(self )
lowercase__ = ConfigTester(self, config_class=__UpperCamelCase, has_text_modality=__UpperCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCamelCase, **__UpperCamelCase, output_hidden_states=__UpperCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCamelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase__ ( self : int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__UpperCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __UpperCamelCase )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase__ = MaskFormerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = (self.model_tester.min_size,) * 2
lowercase__ = {
'''pixel_values''': torch.randn((2, 3, *size), device=__UpperCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size), device=__UpperCamelCase ),
'''class_labels''': torch.zeros(2, 10, device=__UpperCamelCase ).long(),
}
lowercase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCamelCase )
lowercase__ = model(**__UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCamelCase, **__UpperCamelCase, output_hidden_states=__UpperCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__UpperCamelCase ).to(__UpperCamelCase )
lowercase__ = model(**__UpperCamelCase, output_attentions=__UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase__ = self.all_model_classes[1]
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
lowercase__ = model(__UpperCamelCase, mask_labels=__UpperCamelCase, class_labels=__UpperCamelCase ).loss
loss.backward()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
# only MaskFormerForInstanceSegmentation has the loss
lowercase__ = self.all_model_classes[1]
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
lowercase__ = model(__UpperCamelCase, mask_labels=__UpperCamelCase, class_labels=__UpperCamelCase )
lowercase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A__ : Tuple = 1e-4
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Any ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__UpperCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(__UpperCamelCase, return_tensors='''pt''' ).to(__UpperCamelCase )
lowercase__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCamelCase, (1, 3, 800, 1_088) )
with torch.no_grad():
lowercase__ = model(**__UpperCamelCase )
lowercase__ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], __UpperCamelCase, atol=__UpperCamelCase ) )
lowercase__ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], __UpperCamelCase, atol=__UpperCamelCase ) )
lowercase__ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], __UpperCamelCase, atol=__UpperCamelCase ) )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__UpperCamelCase )
.eval()
)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(__UpperCamelCase, return_tensors='''pt''' ).to(__UpperCamelCase )
lowercase__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCamelCase, (1, 3, 800, 1_088) )
with torch.no_grad():
lowercase__ = model(**__UpperCamelCase )
# masks_queries_logits
lowercase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
lowercase__ = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
lowercase__ = torch.tensor(__UpperCamelCase ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], __UpperCamelCase, atol=__UpperCamelCase ) )
# class_queries_logits
lowercase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase__ = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], __UpperCamelCase, atol=__UpperCamelCase ) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__UpperCamelCase )
.eval()
)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(__UpperCamelCase, return_tensors='''pt''' ).to(__UpperCamelCase )
lowercase__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCamelCase, (1, 3, 800, 1_088) )
with torch.no_grad():
lowercase__ = model(**__UpperCamelCase )
# masks_queries_logits
lowercase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
lowercase__ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
lowercase__ = torch.tensor(__UpperCamelCase ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], __UpperCamelCase, atol=__UpperCamelCase ) )
# class_queries_logits
lowercase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase__ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], __UpperCamelCase, atol=__UpperCamelCase ) )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__UpperCamelCase )
.eval()
)
lowercase__ = self.default_image_processor
lowercase__ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors='''pt''', )
lowercase__ = inputs['''pixel_values'''].to(__UpperCamelCase )
lowercase__ = [el.to(__UpperCamelCase ) for el in inputs['''mask_labels''']]
lowercase__ = [el.to(__UpperCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase__ = model(**__UpperCamelCase )
self.assertTrue(outputs.loss is not None )
| 207 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
import itertools
import os
import re
__snake_case = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
__snake_case = re.compile(r'''([a-z\d])([A-Z])''')
__snake_case = re.compile(r'''(?<!_)_(?!_)''')
__snake_case = re.compile(r'''(_{2,})''')
__snake_case = r'^\w+(\.\w+)*$'
__snake_case = r'<>:/\|?*'
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =_uppercase_uppercase_re.sub(R'''\1_\2''' , __lowerCAmelCase )
UpperCAmelCase : int =_lowercase_uppercase_re.sub(R'''\1_\2''' , __lowerCAmelCase )
return name.lower()
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] =_single_underscore_re.split(__lowerCAmelCase )
UpperCAmelCase : List[str] =[_multiple_underscores_re.split(__lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowerCAmelCase ) if n != '''''' )
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__lowerCAmelCase )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __lowerCAmelCase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(__lowerCAmelCase )}-{split}'''
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
UpperCAmelCase : int =os.path.join(__lowerCAmelCase , __lowerCAmelCase )
return f'''{filepath}*'''
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Optional[Any] =os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if shard_lengths:
UpperCAmelCase : Union[str, Any] =len(__lowerCAmelCase )
UpperCAmelCase : int =[f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__lowerCAmelCase )]
if filetype_suffix:
UpperCAmelCase : Dict =[filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
UpperCAmelCase : int =prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 348 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( lowercase_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__(self , lowerCAmelCase = 1 , lowerCAmelCase = 1_0_0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
if audio_length_in_s is None:
__lowercase= self.unet.config.sample_size / self.unet.config.sample_rate
__lowercase= audio_length_in_s * self.unet.config.sample_rate
__lowercase= 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
f' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
__lowercase= int(__UpperCamelCase )
if sample_size % down_scale_factor != 0:
__lowercase= (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.' )
__lowercase= int(__UpperCamelCase )
__lowercase= next(iter(self.unet.parameters() ) ).dtype
__lowercase= (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__lowercase= randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase , device=audio.device )
__lowercase= self.scheduler.timesteps.to(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowercase= self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
__lowercase= self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
__lowercase= audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowercase= audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__UpperCamelCase )
| 295 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = IFInpaintingPipeline
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {"""latents"""}
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict=0 ):
if str(__UpperCamelCase ).startswith('mps' ):
lowerCAmelCase_ : List[str] = torch.manual_seed(__UpperCamelCase )
else:
lowerCAmelCase_ : Any = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowerCAmelCase_ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self : str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 224 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A_, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A_ ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class UpperCAmelCase_ ( lowercase_ ):
'''simple docstring'''
a__ = ["""pixel_values"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : str , ) -> Tuple:
"""simple docstring"""
super().__init__(**__UpperCamelCase )
__magic_name__ = size if size is not None else {"""shortest_edge""": 256}
__magic_name__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
__magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__magic_name__ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = resample
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = offset
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> int:
"""simple docstring"""
__magic_name__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
__magic_name__ = get_resize_output_image_size(__UpperCamelCase , size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
__magic_name__ = (size["""height"""], size["""width"""])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = image.astype(np.floataa )
if offset:
__magic_name__ = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> int:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
__magic_name__ = to_numpy_array(__UpperCamelCase )
if do_resize:
__magic_name__ = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
__magic_name__ = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
__magic_name__ = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
__magic_name__ = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
__magic_name__ = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def _lowercase ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Tuple , ) -> int:
"""simple docstring"""
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = offset if offset is not None else self.offset
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__magic_name__ = make_batched(__UpperCamelCase )
__magic_name__ = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
__magic_name__ = {"""pixel_values""": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 88 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model'}
a = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
a = {'bert_for_seq_generation': 512}
class lowercase_ ( lowercase_ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Optional[int]="<pad>" , _UpperCAmelCase : Tuple="<::::>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Optional[Any] , ):
_A = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.sp_model.get_piece_size()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Optional[int] , _UpperCAmelCase : Dict ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
return self.sp_model.piece_to_id(__UpperCamelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] ):
_A = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int ):
_A = []
_A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
_A = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 315 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
UpperCamelCase = controlnet_params
UpperCamelCase = "bird"
UpperCamelCase = jax.device_count()
UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
UpperCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = jax.random.split(__UpperCamelCase , jax.device_count() )
UpperCamelCase = replicate(__UpperCamelCase )
UpperCamelCase = shard(__UpperCamelCase )
UpperCamelCase = shard(__UpperCamelCase )
UpperCamelCase = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> List[Any]:
UpperCamelCase , UpperCamelCase = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
UpperCamelCase = controlnet_params
UpperCamelCase = "Chef in the kitchen"
UpperCamelCase = jax.device_count()
UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
UpperCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = jax.random.split(__UpperCamelCase , jax.device_count() )
UpperCamelCase = replicate(__UpperCamelCase )
UpperCamelCase = shard(__UpperCamelCase )
UpperCamelCase = shard(__UpperCamelCase )
UpperCamelCase = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 153 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
import heapq
import sys
import numpy as np
__snake_case :Tuple = tuple[int, int]
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = []
__a = set()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return len(self.elements) == 0
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(__UpperCamelCase)
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((__a) , (__a)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if item in self.set:
self.set.remove(__UpperCamelCase)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((__a) , (__a)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.elements[0][1]
def _lowerCamelCase ( self : str):
'''simple docstring'''
((__a) , (__a)) = heapq.heappop(self.elements)
self.set.remove(__UpperCamelCase)
return (priority, item)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# euclidean distance
__a = np.array(_UpperCAmelCase )
__a = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# integer division by time variable
return consistent_heuristic(_UpperCAmelCase , _UpperCAmelCase ) // t
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = g_function[start] + Wa * heuristics[i](_UpperCAmelCase , _UpperCAmelCase )
return ans
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a = '''*'''
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
__a = '''#'''
__a = '''-'''
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = '''-'''
__a = back_pointer[x]
__a = '''-'''
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__a = back_pointer[goal]
while x != start:
print(_UpperCAmelCase , end=''' ''' )
__a = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def __snake_case ( _UpperCAmelCase ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
__a = -1
__a = float('''inf''' )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase , key(_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCAmelCase ):
if key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
def __snake_case ( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__snake_case :Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__snake_case :List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__snake_case :int = make_common_ground()
__snake_case :str = blocks_blk
# hyper parameters
__snake_case :Tuple = 1
__snake_case :str = 1
__snake_case :List[str] = 20
__snake_case :Any = 3 # one consistent and two other inconsistent
# start and end destination
__snake_case :Tuple = (0, 0)
__snake_case :Tuple = (n - 1, n - 1)
__snake_case :str = 1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {start: 0, goal: float('''inf''' )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
__a = []
__a = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_anchor.append(_UpperCAmelCase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 49 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 0 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=lowercase_ ):
"""simple docstring"""
__magic_name__ = ['torch', 'torchsde']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 127 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCAmelCase__ = None
UpperCAmelCase__ = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCAmelCase__ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowerCAmelCase__ :
__a = True
__a = None
# Automatically constructed
__a = """PIL.Image.Image"""
__a = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__a = field(default="""Image""" , init=lowercase_ , repr=lowercase_ )
def __call__( self : Dict ):
return self.pa_type
def lowercase ( self : Optional[Any] , _lowerCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_snake_case = np.array(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__UpperCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__UpperCamelCase )
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__UpperCamelCase )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowercase ( self : Dict , _lowerCamelCase : dict , _lowerCamelCase : List[str]=None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
_snake_case = {}
_snake_case , _snake_case = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(__UpperCamelCase ):
_snake_case = PIL.Image.open(__UpperCamelCase )
else:
_snake_case = path.split('''::''' )[-1]
try:
_snake_case = string_to_dict(__UpperCamelCase , config.HUB_DATASETS_URL )['''repo_id''']
_snake_case = token_per_repo_id.get(__UpperCamelCase )
except ValueError:
_snake_case = None
with xopen(__UpperCamelCase , '''rb''' , use_auth_token=__UpperCamelCase ) as f:
_snake_case = BytesIO(f.read() )
_snake_case = PIL.Image.open(bytes_ )
else:
_snake_case = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase ( self : Optional[int] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def lowercase ( self : Optional[Any] , _lowerCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_snake_case = pa.array([None] * len(__UpperCamelCase ) , type=pa.binary() )
_snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_snake_case = pa.array([None] * len(__UpperCamelCase ) , type=pa.string() )
_snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
_snake_case = storage.field('''bytes''' )
else:
_snake_case = pa.array([None] * len(__UpperCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
_snake_case = storage.field('''path''' )
else:
_snake_case = pa.array([None] * len(__UpperCamelCase ) , type=pa.string() )
_snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_snake_case = pa.array(
[encode_np_array(np.array(__UpperCamelCase ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_snake_case = pa.array([None] * len(__UpperCamelCase ) , type=pa.string() )
_snake_case = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__UpperCamelCase , self.pa_type )
def lowercase ( self : str , _lowerCamelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase : List[str] ):
with xopen(__UpperCamelCase , '''rb''' ) as f:
_snake_case = f.read()
return bytes_
_snake_case = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_snake_case = pa.array(
[os.path.basename(__UpperCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
_snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__UpperCamelCase , self.pa_type )
def _UpperCAmelCase ( ) -> str:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_snake_case = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> Union[str, Any]:
_snake_case = BytesIO()
if image.format in list_image_compression_formats():
_snake_case = image.format
else:
_snake_case = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
if hasattr(__lowerCamelCase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[Any]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
_snake_case = array.dtype
_snake_case = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
_snake_case = dtype.kind
_snake_case = dtype.itemsize
_snake_case = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_snake_case = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_snake_case = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_snake_case = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
_snake_case = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
_snake_case = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Optional[int]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
_snake_case , _snake_case = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
_snake_case = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
_snake_case = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 288 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 0 |
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = generate_pascal_triangle(_lowercase )
for row_idx in range(_lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A ( _lowercase ):
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
SCREAMING_SNAKE_CASE : List[str] = []
for current_row_idx in range(_lowercase ):
SCREAMING_SNAKE_CASE : int = populate_current_row(_lowercase , _lowercase )
triangle.append(_lowercase )
return triangle
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = 1, 1
for current_col_idx in range(1 , _lowercase ):
calculate_current_element(
_lowercase , _lowercase , _lowercase , _lowercase )
return current_row
def A ( _lowercase , _lowercase , _lowercase , _lowercase , ):
SCREAMING_SNAKE_CASE : str = triangle[current_row_idx - 1][current_col_idx - 1]
SCREAMING_SNAKE_CASE : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx]
SCREAMING_SNAKE_CASE : Union[str, Any] = above_to_left_elt + above_to_right_elt
def A ( _lowercase ):
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
SCREAMING_SNAKE_CASE : int = [[1]]
for row_index in range(1 , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = [0] + result[-1] + [0]
SCREAMING_SNAKE_CASE : Tuple = row_index + 1
# Calculate the number of distinct elements in a row
SCREAMING_SNAKE_CASE : Optional[int] = sum(divmod(_lowercase , 2 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
SCREAMING_SNAKE_CASE : str = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
SCREAMING_SNAKE_CASE : int = row_first_half + row_second_half
result.append(_lowercase )
return result
def A ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowercase , _lowercase ) -> None:
SCREAMING_SNAKE_CASE : str = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE : List[str] = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_lowercase , _lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 182 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : Dict, lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCamelCase, scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : Tuple, lowerCamelCase : int = 1, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : float = 0.0, lowerCamelCase : int = 50, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size, __UpperCamelCase ):
lowercase__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCamelCase, __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase__ = randn_tensor(__UpperCamelCase, generator=__UpperCamelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase__ = self.unet(__UpperCamelCase, __UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase__ = self.scheduler.step(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, eta=__UpperCamelCase, use_clipped_model_output=__UpperCamelCase, generator=__UpperCamelCase ).prev_sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 207 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __snake_case :
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
return None
class __snake_case :
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
return None
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : Optional[int] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , '''tf''' , 12 , **__UpperCamelCase )
@require_torch
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , '''pt''' , 12 , **__UpperCamelCase )
@require_torch
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
from transformers import BertModel
UpperCAmelCase : Union[str, Any] =['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__UpperCamelCase ) )
vocab_file.flush()
UpperCAmelCase : Optional[int] =BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCAmelCase : List[str] =BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase , '''pt''' , 12 , __UpperCamelCase )
@require_tf
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCAmelCase : Tuple =self._test_export(__UpperCamelCase , '''tf''' , 12 , **__UpperCamelCase )
UpperCAmelCase : Any =quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCAmelCase : Optional[int] =self._test_export(__UpperCamelCase , '''pt''' , 12 , **__UpperCamelCase )
UpperCAmelCase : Dict =quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ) -> List[Any]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCAmelCase : List[Any] =Path(__UpperCamelCase ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import BertModel
UpperCAmelCase : Optional[Any] =BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCAmelCase : int =BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , '''pt''' )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
UpperCAmelCase : int =TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCAmelCase : Dict =BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , '''tf''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =FeatureExtractionPipeline(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase : Union[str, Any] =['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple =infer_shapes(__UpperCamelCase , __UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] , __UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =['''input_ids''', '''attention_mask''', '''token_type_ids''']
UpperCAmelCase : str ={'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
UpperCAmelCase , UpperCAmelCase : List[str] =ensure_valid_input(FuncContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) , set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCAmelCase , UpperCAmelCase : List[str] =ensure_valid_input(FuncNonContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) , 1 )
self.assertEqual(len(__UpperCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 348 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=1_6 , lowerCAmelCase=3_6 , lowerCAmelCase=6 , lowerCAmelCase=6 , lowerCAmelCase=6 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= AlbertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
__lowercase= model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
__lowercase= model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= AlbertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , sentence_order_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= AlbertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= AlbertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= AlbertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= AlbertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_choices
__lowercase= AlbertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : int =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] =True
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
__lowercase= torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def _A (self ):
__lowercase= AlbertModelTester(self )
__lowercase= ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase= type
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def _A (self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= AlbertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= AlbertModel.from_pretrained('albert-base-v2' )
__lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase= model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
__lowercase= torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __UpperCamelCase )
__lowercase= torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) )
| 295 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """marian"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any=5_8_1_0_1 , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : str=4_0_9_6 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Any=1_0_2_4 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=5_8_1_0_0 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[int]=5_8_1_0_0 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : Dict=True , **SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : int = decoder_vocab_size or vocab_size
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : Optional[int] = d_model
lowerCAmelCase_ : int = encoder_ffn_dim
lowerCAmelCase_ : Any = encoder_layers
lowerCAmelCase_ : Optional[Any] = encoder_attention_heads
lowerCAmelCase_ : Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ : List[Any] = decoder_layers
lowerCAmelCase_ : Union[str, Any] = decoder_attention_heads
lowerCAmelCase_ : List[str] = dropout
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : Optional[int] = activation_dropout
lowerCAmelCase_ : Any = activation_function
lowerCAmelCase_ : Tuple = init_std
lowerCAmelCase_ : Optional[Any] = encoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : List[str] = encoder_layers
lowerCAmelCase_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowerCAmelCase_ : Tuple = {0: 'batch'}
lowerCAmelCase_ : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCAmelCase_ : str = {0: 'batch', 1: 'decoder_sequence'}
lowerCAmelCase_ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase_ : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowerCAmelCase_ ,lowerCAmelCase_ : str = self.num_layers
for i in range(__UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
lowerCAmelCase_ : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
else:
lowerCAmelCase_ : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[Any] = super().outputs
else:
lowerCAmelCase_ : Union[str, Any] = super(__UpperCamelCase , self ).outputs
if self.use_past:
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = self.num_layers
for i in range(__UpperCamelCase ):
lowerCAmelCase_ : int = {0: 'batch', 2: 'past_sequence + sequence'}
lowerCAmelCase_ : int = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
lowerCAmelCase_ : Union[str, Any] = seq_length if not self.use_past else 1
lowerCAmelCase_ : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[Any] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase_ : Any = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = common_inputs['input_ids'].shape
lowerCAmelCase_ : List[str] = common_inputs['decoder_input_ids'].shape[1]
lowerCAmelCase_ ,lowerCAmelCase_ : Any = self.num_attention_heads
lowerCAmelCase_ : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : Union[str, Any] = decoder_seq_length + 3
lowerCAmelCase_ : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase_ : int = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
lowerCAmelCase_ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = self.num_layers
lowerCAmelCase_ : str = min(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Dict = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
lowerCAmelCase_ : Optional[int] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
lowerCAmelCase_ : Optional[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Union[str, Any] = seqlen + 2
lowerCAmelCase_ ,lowerCAmelCase_ : int = self.num_layers
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = self.num_attention_heads
lowerCAmelCase_ : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : Tuple = common_inputs['attention_mask'].dtype
lowerCAmelCase_ : Tuple = torch.cat(
[common_inputs['attention_mask'], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
lowerCAmelCase_ : List[Any] = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : List[Any] = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : Union[str, Any] = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
lowerCAmelCase_ : int = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : Any = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase_ : int = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
lowerCAmelCase_ : Tuple = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : List[Any] = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
lowerCAmelCase_ : Tuple = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return 1E-4
| 224 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(__UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1E-3 ) )
@slow
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(__UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1E-3 ) )
| 88 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
_A = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_A = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=_snake_case , output_all_encodings=_snake_case , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , _snake_case ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_A = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
_A = os.path.join(get_home_dir() , 'models' )
_A = _load_vocab(_snake_case , _snake_case , _snake_case , cls=_snake_case )
_A = nlp.model.BERTModel(
_snake_case , len(_snake_case ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=_snake_case , use_token_type_embed=_snake_case , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=_snake_case , use_decoder=_snake_case , )
original_bort.load_parameters(_snake_case , cast_dtype=_snake_case , ignore_extra=_snake_case )
_A = original_bort._collect_params_with_prefix()
# Build our config 🤗
_A = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(_snake_case ),
}
_A = BertConfig.from_dict(_snake_case )
_A = BertForMaskedLM(_snake_case )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_snake_case : List[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_snake_case : str , _snake_case : int ):
_A = hf_param.shape
_A = to_torch(params[gluon_param] )
_A = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
_A = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
_A = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
_A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
_A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_A = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_A = hf_bort_model.bert.encoder.layer[i]
# self attention
_A = layer.attention.self
_A = check_and_map_params(
self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
_A = check_and_map_params(
self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
_A = check_and_map_params(
self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
_A = check_and_map_params(
self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
_A = check_and_map_params(
self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
_A = check_and_map_params(
self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
_A = layer.attention.output
_A = check_and_map_params(
self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' )
_A = check_and_map_params(
self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' )
_A = check_and_map_params(
self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
_A = check_and_map_params(
self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
_A = layer.intermediate
_A = check_and_map_params(
intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
_A = check_and_map_params(
intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
_A = layer.output
_A = check_and_map_params(
bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
_A = check_and_map_params(
bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
_A = check_and_map_params(
bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
_A = check_and_map_params(
bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_A = RobertaTokenizer.from_pretrained('roberta-base' )
_A = tokenizer.encode_plus(_snake_case )['input_ids']
# Get gluon output
_A = mx.nd.array([input_ids] )
_A = original_bort(inputs=_snake_case , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_snake_case )
_A = BertModel.from_pretrained(_snake_case )
hf_bort_model.eval()
_A = tokenizer.encode_plus(_snake_case , return_tensors='pt' )
_A = hf_bort_model(**_snake_case )[0]
_A = output_gluon[0].asnumpy()
_A = output_hf[0].detach().numpy()
_A = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_A = np.allclose(_snake_case , _snake_case , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , _snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 315 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
UpperCamelCase = b * b - 4 * a * c
UpperCamelCase = (-b + sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
UpperCamelCase = (-b - sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a__ ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 153 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
__a = get_activation('''gelu''')
self.assertTrue(torch.allclose(gelu_python(__UpperCamelCase) , torch_builtin(__UpperCamelCase)))
self.assertFalse(torch.allclose(gelu_python(__UpperCamelCase) , gelu_new(__UpperCamelCase)))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
__a = get_activation('''gelu''')
__a = get_activation('''gelu_10''')
__a = torch_builtin(__UpperCamelCase)
__a = geluaa(__UpperCamelCase)
__a = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(__UpperCamelCase).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
get_activation('''gelu''')
get_activation('''gelu_10''')
get_activation('''gelu_fast''')
get_activation('''gelu_new''')
get_activation('''gelu_python''')
get_activation('''gelu_pytorch_tanh''')
get_activation('''linear''')
get_activation('''mish''')
get_activation('''quick_gelu''')
get_activation('''relu''')
get_activation('''sigmoid''')
get_activation('''silu''')
get_activation('''swish''')
get_activation('''tanh''')
with self.assertRaises(__UpperCamelCase):
get_activation('''bogus''')
with self.assertRaises(__UpperCamelCase):
get_activation(__UpperCamelCase)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = get_activation('''gelu''')
__a = 1
__a = get_activation('''gelu''')
self.assertEqual(acta.a , 1)
with self.assertRaises(__UpperCamelCase):
__a = acta.a
| 49 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_SCREAMING_SNAKE_CASE : Optional[int] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = SavedModel()
snake_case = []
with open(os.path.join(UpperCamelCase_ ,'''utils''' ,'''tf_ops''' ,'''onnx.json''' ) ) as f:
snake_case = json.load(UpperCamelCase_ )['''opsets''']
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(UpperCamelCase_ )] )
with open(UpperCamelCase_ ,'''rb''' ) as f:
saved_model.ParseFromString(f.read() )
snake_case = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case = sorted(UpperCamelCase_ )
snake_case = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(UpperCamelCase_ )
if strict and len(UpperCamelCase_ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(UpperCamelCase_ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*UpperCamelCase_ ,sep='''\n''' )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 127 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
"""simple docstring"""
import math
import sys
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Optional[int]:
_snake_case = ''''''
try:
with open(__lowerCamelCase , '''rb''' ) as binary_file:
_snake_case = binary_file.read()
for dat in data:
_snake_case = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[Any]:
_snake_case = {'''0''': '''0''', '''1''': '''1'''}
_snake_case , _snake_case = '''''', ''''''
_snake_case = len(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_snake_case = lexicon[curr_string]
result += last_match_id
_snake_case = last_match_id + '''0'''
if math.loga(__lowerCamelCase ).is_integer():
_snake_case = {}
for curr_key in list(__lowerCamelCase ):
_snake_case = lexicon.pop(__lowerCamelCase )
_snake_case = new_lex
_snake_case = last_match_id + '''1'''
index += 1
_snake_case = ''''''
return result
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str ) -> Tuple:
_snake_case = 8
try:
with open(__lowerCamelCase , '''wb''' ) as opened_file:
_snake_case = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowerCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[str]:
_snake_case = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_snake_case = data_bits[counter:]
_snake_case = data_bits[counter + 1 :]
return data_bits
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str ) -> Dict:
_snake_case = read_file_binary(__lowerCamelCase )
_snake_case = remove_prefix(__lowerCamelCase )
_snake_case = decompress_data(__lowerCamelCase )
write_file_binary(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 288 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
import math
__UpperCamelCase : Any = 10
__UpperCamelCase : int = 7
__UpperCamelCase : List[Any] = BALLS_PER_COLOUR * NUM_COLOURS
def A ( _lowercase = 20 ):
SCREAMING_SNAKE_CASE : List[Any] = math.comb(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 182 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ : Any = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = ['PerceiverFeatureExtractor']
A__ : str = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
import string
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =''''''
for i in sequence:
UpperCAmelCase : Optional[int] =ord(__lowerCAmelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Dict =string.ascii_letters
UpperCAmelCase : Optional[int] =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__lowerCAmelCase )] if c in letters else c for c in sequence )
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
from timeit import timeit
print('''Running performance benchmarks...''' )
UpperCAmelCase : Dict ='''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=__lowerCAmelCase )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' , setup=__lowerCAmelCase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 348 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase = 'src/transformers'
# Matches is_xxx_available()
lowerCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase = re.compile(R'''^\s*else:''')
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
if _re_test_backend.search(lowercase__ ) is None:
return None
__lowercase= [b[0] for b in _re_backend.findall(lowercase__ )]
backends.sort()
return "_and_".join(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase= f.readlines()
__lowercase= 0
while line_index < len(lowercase__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase__ ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase= []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase= lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase__ ):
__lowercase= _re_one_line_import_struct.search(lowercase__ ).groups()[0]
__lowercase= re.findall('\[([^\]]+)\]' , lowercase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase= _re_import_struct_key_value.search(lowercase__ )
if single_line_import_search is not None:
__lowercase= [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif line.startswith(' ' * 8 + '\"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase= {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase= find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase= None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase= []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase= lines[line_index]
if _re_import_struct_add_one.search(lowercase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase__ ) is not None:
__lowercase= _re_import_struct_add_many.search(lowercase__ ).groups()[0].split(', ' )
__lowercase= [obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_between_brackets.search(lowercase__ ) is not None:
__lowercase= _re_between_brackets.search(lowercase__ ).groups()[0].split(', ' )
__lowercase= [obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_quote_object.search(lowercase__ ) is not None:
objects.append(_re_quote_object.search(lowercase__ ).groups()[0] )
elif line.startswith(' ' * 8 + '\"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 1_2 + '\"' ):
objects.append(line[1_3:-3] )
line_index += 1
__lowercase= objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase= []
while (
line_index < len(lowercase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase= lines[line_index]
__lowercase= _re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase= {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase= find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase= None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase= []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase= lines[line_index]
__lowercase= _re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__lowercase= objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
def find_duplicates(lowercase__ ):
return [k for k, v in collections.Counter(lowercase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase= []
for key in import_dict_objects.keys():
__lowercase= find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
__lowercase= find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase= 'base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def _lowerCamelCase( ) -> Tuple:
'''simple docstring'''
__lowercase= []
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
__lowercase= os.path.join(lowercase__ , '__init__.py' )
__lowercase= parse_init(lowercase__ )
if objects is not None:
__lowercase= analyze_results(*lowercase__ )
if len(lowercase__ ) > 0:
__lowercase= F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase__ ) )
if len(lowercase__ ) > 0:
raise ValueError('\n\n'.join(lowercase__ ) )
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= []
for path, directories, files in os.walk(lowercase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase__ ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase= str((Path(lowercase__ ) / folder).relative_to(lowercase__ ) )
__lowercase= short_path.replace(os.path.sep , '.' )
submodules.append(lowercase__ )
for fname in files:
if fname == "__init__.py":
continue
__lowercase= str((Path(lowercase__ ) / fname).relative_to(lowercase__ ) )
__lowercase= short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase__ )
return submodules
lowerCAmelCase = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def _lowerCamelCase( ) -> Dict:
'''simple docstring'''
__lowercase= importlib.util.spec_from_file_location(
'transformers' , os.path.join(lowercase__ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase= spec.loader.load_module()
__lowercase= [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase__ ) > 0:
__lowercase= '\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 295 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def UpperCamelCase_ ( ) -> int:
"""simple docstring"""
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : Tuple = 2
while True:
lowerCAmelCase_ : str = factor_map.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if factor:
lowerCAmelCase_ : Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ : Any = factor
else:
lowerCAmelCase_ : List[Any] = prime
yield prime
prime += 1
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] = 1e10 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = sieve()
lowerCAmelCase_ : Optional[Any] = 1
while True:
lowerCAmelCase_ : Any = next(lowerCAmelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 224 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a__ ( A_, A_, A_=8 ):
'''simple docstring'''
__magic_name__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__magic_name__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( lowercase_ ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : VQModel , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
__magic_name__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> int:
"""simple docstring"""
if latents is None:
__magic_name__ = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__magic_name__ = latents.to(__UpperCamelCase )
__magic_name__ = latents * scheduler.init_noise_sigma
return latents
def _lowercase ( self : Tuple , UpperCamelCase__ : Any=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__magic_name__ = torch.device(F'''cuda:{gpu_id}''' )
__magic_name__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def _lowercase ( self : Dict , UpperCamelCase__ : int=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__magic_name__ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
__magic_name__ , __magic_name__ = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
__magic_name__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self : str ) -> str:
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self : List[Any] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 100 , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ) -> str:
"""simple docstring"""
__magic_name__ = self._execution_device
__magic_name__ = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = torch.cat(__UpperCamelCase , dim=0 )
__magic_name__ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__magic_name__ = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
__magic_name__ = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
__magic_name__ = hint.repeat_interleave(__UpperCamelCase , dim=0 )
__magic_name__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
__magic_name__ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
__magic_name__ = self.scheduler.timesteps
__magic_name__ = self.movq.config.latent_channels
__magic_name__ , __magic_name__ = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
__magic_name__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ = {"""image_embeds""": image_embeds, """hint""": hint}
__magic_name__ = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ = noise_pred.chunk(2 )
__magic_name__ , __magic_name__ = variance_pred.chunk(2 )
__magic_name__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
__magic_name__ = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__magic_name__ = image * 0.5 + 0.5
__magic_name__ = image.clamp(0 , 1 )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 88 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase_ ( lowercase_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( _UpperCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase_ ( self : Union[str, Any] ):
raise NotImplementedError()
| 315 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase ( lowercase_ , unittest.TestCase ):
UpperCAmelCase_ = CLIPTokenizer
UpperCAmelCase_ = CLIPTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = {}
UpperCAmelCase_ = False
def snake_case_ (self ) -> List[str]:
super().setUp()
# fmt: off
UpperCamelCase = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
UpperCamelCase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase = {"unk_token": "<unk>"}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def snake_case_ (self , **__a ) -> Dict:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def snake_case_ (self , **__a ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def snake_case_ (self , __a ) -> Optional[Any]:
UpperCamelCase = "lower newer"
UpperCamelCase = "lower newer"
return input_text, output_text
def snake_case_ (self ) -> List[str]:
UpperCamelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = "lower newer"
UpperCamelCase = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
@require_ftfy
def snake_case_ (self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase = tokenizer_s.tokenize(__UpperCamelCase )
UpperCamelCase = tokenizer_r.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase = tokenizer_s.tokenize(__UpperCamelCase )
UpperCamelCase = tokenizer_r.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase = tokenizer_s.tokenize(__UpperCamelCase )
UpperCamelCase = tokenizer_r.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase = tokenizer_s.tokenize(__UpperCamelCase )
UpperCamelCase = tokenizer_r.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def snake_case_ (self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase = F"{text_of_1_token} {text_of_1_token}"
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , )
UpperCamelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
UpperCamelCase = F" {text}"
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , )
UpperCamelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ) + 1, 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
def snake_case_ (self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(__UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def snake_case_ (self ) -> Union[str, Any]:
super().test_tokenization_python_rust_equals()
def snake_case_ (self ) -> int:
# CLIP always lower cases letters
pass
| 153 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
from __future__ import annotations
__snake_case :str = []
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 1
solve(_UpperCAmelCase , row + 1 )
__a = 0
return False
def __snake_case ( _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__snake_case :List[str] = 8
__snake_case :List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 49 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = BlenderbotSmallTokenizer
__magic_name__ = False
def a_ ( self ):
super().setUp()
snake_case = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
snake_case = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
snake_case = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
snake_case = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def a_ ( self , **__snake_case ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def a_ ( self , __snake_case ):
snake_case = '''adapt act apte'''
snake_case = '''adapt act apte'''
return input_text, output_text
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''adapt act apte'''
snake_case = ['''adapt''', '''act''', '''ap@@''', '''te''']
snake_case = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
snake_case = '''I am a small frog.'''
snake_case = tok([src_text] , padding=__UpperCamelCase , truncation=__UpperCamelCase )['''input_ids''']
snake_case = tok.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
snake_case = '''I am a small frog .'''
snake_case = '''.'''
snake_case = tok(__UpperCamelCase )['''input_ids''']
snake_case = tok(__UpperCamelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 127 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 288 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 0 |
def A ( ):
return 1
def A ( _lowercase ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def A ( _lowercase ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowercase )
def A ( _lowercase = 200 ):
return two_pound(_lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 182 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
# TODO: upload to AWS
A__ : Tuple = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class _UpperCAmelCase ( lowercase_ ):
"""simple docstring"""
lowercase__ = """retribert"""
def __init__( self : Any, lowerCamelCase : Union[str, Any]=30_522, lowerCamelCase : Optional[Any]=768, lowerCamelCase : Tuple=8, lowerCamelCase : List[Any]=12, lowerCamelCase : int=3_072, lowerCamelCase : Dict="gelu", lowerCamelCase : List[Any]=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Any=512, lowerCamelCase : str=2, lowerCamelCase : str=0.02, lowerCamelCase : Any=1E-12, lowerCamelCase : int=True, lowerCamelCase : Union[str, Any]=128, lowerCamelCase : Any=0, **lowerCamelCase : List[Any], ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase, **__UpperCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = share_encoders
lowercase__ = projection_dim
| 207 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
from ...processing_utils import ProcessorMixin
class __snake_case ( lowercase_ ):
__lowerCamelCase : Union[str, Any] = ["""image_processor""", """feature_extractor"""]
__lowerCamelCase : Tuple = """TvltImageProcessor"""
__lowerCamelCase : Optional[Any] = """TvltFeatureExtractor"""
def __init__( self , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(image_processor=__UpperCamelCase , feature_extractor=__UpperCamelCase )
UpperCAmelCase : Dict =image_processor
UpperCAmelCase : Union[str, Any] =feature_extractor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=False , *snake_case__ , **snake_case__ , ) -> Optional[Any]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
UpperCAmelCase : str =None
if images is not None:
UpperCAmelCase : Any =self.image_processor(__UpperCamelCase , mask_pixel=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if images_mixed is not None:
UpperCAmelCase : Dict =self.image_processor(__UpperCamelCase , is_mixed=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if audio is not None:
UpperCAmelCase : Dict =self.feature_extractor(
__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , mask_audio=__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase : Optional[Any] ={}
if audio is not None:
output_dict.update(__UpperCamelCase )
if images is not None:
output_dict.update(__UpperCamelCase )
if images_mixed_dict is not None:
output_dict.update(__UpperCamelCase )
return output_dict
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.image_processor.model_input_names
UpperCAmelCase : Union[str, Any] =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 348 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
import numpy as np
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : Union[str, Any] = mock.Mock()
lowerCAmelCase_ : Dict = 5_0_0
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Dict = HTTPError
lowerCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
lowerCAmelCase_ : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : List[str] = mock.Mock()
lowerCAmelCase_ : Dict = 5_0_0
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : List[Any] = HTTPError
lowerCAmelCase_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : int = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
lowerCAmelCase_ : List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
try:
lowerCAmelCase_ : str = tempfile.mktemp()
with open(__UpperCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , __UpperCamelCase )
lowerCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(__UpperCamelCase )
finally:
os.remove(__UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , __UpperCamelCase )
lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : Optional[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ):
lowerCAmelCase_ : int = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Any = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase_ : Union[str, Any] = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
lowerCAmelCase_ : Dict = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase , repo_id='test-tokenizer' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Dict = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : List[Any] = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase_ : Dict = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
lowerCAmelCase_ : Any = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__UpperCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Union[str, Any] = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase_ : Union[str, Any] = CustomTokenizer(__UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase_ : Optional[int] = BertTokenizerFast.from_pretrained(__UpperCamelCase )
bert_tokenizer.save_pretrained(__UpperCamelCase )
lowerCAmelCase_ : int = CustomTokenizerFast.from_pretrained(__UpperCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
F"{USER}/test-dynamic-tokenizer" , use_fast=__UpperCamelCase , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : str = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Dict = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Optional[Any] = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Optional[Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[str] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : int = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
lowerCAmelCase_ : Optional[int] = Trie()
lowerCAmelCase_ : Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__UpperCamelCase , ['AB', 'C'] )
| 224 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : list ) -> Dict:
"""simple docstring"""
__magic_name__ = set_counts
__magic_name__ = max(__UpperCamelCase )
__magic_name__ = len(__UpperCamelCase )
__magic_name__ = [1] * num_sets
__magic_name__ = list(range(__UpperCamelCase ) )
def _lowercase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
__magic_name__ = self.get_parent(__UpperCamelCase )
__magic_name__ = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__magic_name__ = 0
__magic_name__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__magic_name__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__magic_name__ = 0
__magic_name__ = src_parent
__magic_name__ = self.set_counts[src_parent]
__magic_name__ = max(self.max_set , __UpperCamelCase )
return True
def _lowercase ( self : str , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__magic_name__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 88 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a = 'main'
# Default branch name
a = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
a = 'aaaaaaa'
# This commit does not exist, so we should 404.
a = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
a = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def _snake_case ( ) -> int:
'''simple docstring'''
print('Bonjour!' )
yield
print('Au revoir!' )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ):
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] ):
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str ):
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def lowerCAmelCase_ ( self : int ):
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] )
class lowercase_ ( lowercase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
@require_tf
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCamelCase ) , ['start_positions', 'end_positions'] )
class lowercase_ ( lowercase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ['labels'] )
@require_flax
def lowerCAmelCase_ ( self : int ):
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
class lowercase_ ( lowercase_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
| 315 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> int:
UpperCamelCase = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__UpperCamelCase )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = logging.get_verbosity()
UpperCamelCase = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning(__UpperCamelCase )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning(__UpperCamelCase )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning(__UpperCamelCase )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(__UpperCamelCase )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def snake_case_ (self ) -> int:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase = os.getenv("TRANSFORMERS_VERBOSITY" , __UpperCamelCase )
UpperCamelCase = logging.log_levels[env_level_str]
UpperCamelCase = logging.get_verbosity()
self.assertEqual(
__UpperCamelCase , __UpperCamelCase , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
UpperCamelCase = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def snake_case_ (self ) -> str:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase = logging.logging.getLogger()
with CaptureLogger(__UpperCamelCase ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def snake_case_ (self ) -> Tuple:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning_advice(__UpperCamelCase )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__UpperCamelCase ) as cl:
logger.warning_advice(__UpperCamelCase )
self.assertEqual(cl.out , msg + "\n" )
def a__ ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 153 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _A :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : int=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : List[Any]=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Tuple=10 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=[1, 16, 4, 4] , __SCREAMING_SNAKE_CASE : List[str]=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__a = (self.image_size // 32) ** 2
__a = num_patches + 1
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__UpperCamelCase , )
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = ViTHybridModel(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
__a = model(__UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = self.type_sequence_label_size
__a = ViTHybridForImageClassification(__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
__a = model(__UpperCamelCase , labels=__UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( lowercase_ ,lowercase_ ,unittest.TestCase ):
UpperCamelCase__ : str = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase__ : str = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : Dict = False
UpperCamelCase__ : Any = False
UpperCamelCase__ : int = False
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = ViTHybridModelTester(self)
__a = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__UpperCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear))
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__UpperCamelCase)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = _config_zero_init(__UpperCamelCase)
for model_class in self.all_model_classes:
__a = model_class(config=__UpperCamelCase)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__a = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTHybridModel.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
def __snake_case ( ):
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__UpperCamelCase)
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__UpperCamelCase , return_tensors='''pt''').to(__UpperCamelCase)
# forward pass
with torch.no_grad():
__a = model(**__UpperCamelCase)
# verify the logits
__a = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , __UpperCamelCase)
__a = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(__UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4))
@slow
@require_accelerate
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
__a = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
__a = prepare_img()
__a = image_processor(images=__UpperCamelCase , return_tensors='''pt''')
__a = model(**__UpperCamelCase)
__a = outputs.logits
# model predicts one of the 1000 ImageNet classes
__a = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 49 |
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=3_2 , __snake_case=2 , __snake_case=3 , __snake_case=1_6 , __snake_case=[1, 2, 1] , __snake_case=[2, 2, 4] , __snake_case=2 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=True , __snake_case=0.02 , __snake_case=1E-5 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=1_0 , __snake_case=8 , ):
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = depths
snake_case = num_heads
snake_case = window_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_absolute_embeddings
snake_case = patch_norm
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = is_training
snake_case = scope
snake_case = use_labels
snake_case = type_sequence_label_size
snake_case = encoder_stride
def a_ ( self ):
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def a_ ( self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = SwinvaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case = model(__UpperCamelCase )
snake_case = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = SwinvaForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case = 1
snake_case = SwinvaForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = self.type_sequence_label_size
snake_case = SwinvaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__magic_name__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = SwinvaModelTester(self )
snake_case = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=3_7 )
def a_ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def a_ ( self ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def a_ ( self ):
pass
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__UpperCamelCase )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
for model_class in self.all_model_classes:
snake_case = True
snake_case = False
snake_case = True
snake_case = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case = outputs.attentions
snake_case = len(self.model_tester.depths )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case = True
snake_case = config.window_size**2
snake_case = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case = len(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case = True
snake_case = True
snake_case = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
snake_case = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) )
snake_case = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case = outputs.hidden_states
snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# Swinv2 has a different seq_length
snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
snake_case , snake_case , snake_case , snake_case = reshaped_hidden_states[0].shape
snake_case = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def a_ ( self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = SwinvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
snake_case = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a_ ( self ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def a_ ( self ):
snake_case = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
__UpperCamelCase )
snake_case = self.default_image_processor
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
snake_case = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case = model(**__UpperCamelCase )
# verify the logits
snake_case = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 127 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 0 |
"""simple docstring"""
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase__ ( lowercase_ ):
def __init__( self : Any , _lowerCamelCase : Optional[Any] ):
_snake_case = data
def __iter__( self : List[str] ):
for element in self.data:
yield element
def _UpperCAmelCase ( __lowerCamelCase : int=True ) -> List[Any]:
_snake_case = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple = False ) -> Dict:
if iterable:
_snake_case = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
_snake_case = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
_snake_case = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
_snake_case = accelerator.prepare(__lowerCamelCase )
return dl
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , ) -> Union[str, Any]:
_snake_case = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
_snake_case = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _UpperCAmelCase ( ) -> Tuple:
_snake_case = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _UpperCAmelCase ( ) -> Optional[int]:
_snake_case = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _UpperCAmelCase ( ) -> List[str]:
_snake_case = create_accelerator(even_batches=__lowerCamelCase )
_snake_case = torch.nn.Linear(1 , 1 )
_snake_case = accelerator.prepare(__lowerCamelCase )
_snake_case = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
_snake_case = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
_snake_case = ddp_model(batch[0].float() )
_snake_case = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> List[Any]:
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = True
_snake_case = False
_snake_case = create_accelerator(even_batches=__lowerCamelCase )
_snake_case = torch.nn.Linear(1 , 1 )
_snake_case = accelerator.prepare(__lowerCamelCase )
_snake_case = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
_snake_case = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
_snake_case = train_dl.batch_sampler.even_batches
_snake_case = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _UpperCAmelCase ( ) -> Any:
_snake_case = True
_snake_case = False
_snake_case = create_accelerator(even_batches=__lowerCamelCase )
_snake_case = torch.nn.Linear(1 , 1 )
_snake_case = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
_snake_case = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
_snake_case = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _UpperCAmelCase ( ) -> Any:
_snake_case = create_accelerator()
_snake_case = torch.nn.Linear(1 , 1 )
_snake_case = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def _UpperCAmelCase ( ) -> Tuple:
_snake_case = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
_snake_case = accelerator.state.distributed_type
_snake_case = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
_snake_case = original_state
if __name__ == "__main__":
main()
| 288 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCamelCase : Union[str, Any] = getLogger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase = 8 , _lowercase = 1_024 , _lowercase="val" , _lowercase=None , _lowercase=False , _lowercase="summarization" , _lowercase=None , _lowercase=1 , _lowercase = None , _lowercase="" , **_lowercase , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = str(_lowercase )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_lowercase )
SCREAMING_SNAKE_CASE : Tuple = Path(_lowercase )
SCREAMING_SNAKE_CASE : int = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowercase , _lowercase ) # update config with task specific params
SCREAMING_SNAKE_CASE : Optional[Any] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE : int = num_return_sequences
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(_lowercase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE : Tuple = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
SCREAMING_SNAKE_CASE : Any = SeqaSeqDataset(
_lowercase , _lowercase , _lowercase , max_target_length=1_024 , type_path=_lowercase , n_obs=_lowercase , prefix=_lowercase , **_lowercase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE : str = ds.make_sortish_sampler(_lowercase , distributed=_lowercase , add_extra_examples=_lowercase , shuffle=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(_lowercase , sampler=_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE : List[str] = []
for batch in tqdm(_lowercase ):
SCREAMING_SNAKE_CASE : Tuple = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_lowercase , num_beams=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = batch['''ids''']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = chunks(_lowercase , _lowercase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowercase ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_lowercase , _lowercase )
return results, sampler.num_replicas
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_lowercase , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_lowercase , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_lowercase , default=_lowercase )
parser.add_argument(
'''--type_path''' , type=_lowercase , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_lowercase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_lowercase , default=8 , required=_lowercase , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_lowercase , default=-1 , required=_lowercase , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_lowercase , default=_lowercase , required=_lowercase , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_lowercase , default=1 , required=_lowercase , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_lowercase , default=600 , required=_lowercase , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument('''--tgt_lang''' , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument(
'''--prefix''' , type=_lowercase , required=_lowercase , default=_lowercase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
SCREAMING_SNAKE_CASE : str = time.time()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = parser.parse_known_args()
SCREAMING_SNAKE_CASE : List[str] = parse_numeric_n_bool_cl_kwargs(_lowercase )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
SCREAMING_SNAKE_CASE : Dict = Path(args.save_dir + '''_tmp''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase ) # this handles locking.
SCREAMING_SNAKE_CASE : Dict = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE : str = {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE : Dict = args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE : int = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = eval_data_dir(
args.data_dir , _lowercase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_lowercase , **_lowercase , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowercase )
SCREAMING_SNAKE_CASE : int = gather_results_from_each_node(_lowercase , _lowercase , args.sync_timeout )
SCREAMING_SNAKE_CASE : Optional[int] = combine_partial_results(_lowercase )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE : Dict = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_lowercase , _lowercase )
return
SCREAMING_SNAKE_CASE : int = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_lowercase ) as f:
SCREAMING_SNAKE_CASE : Any = [x.rstrip() for x in f.readlines()][: len(_lowercase )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE : Optional[int] = '''translation''' in args.task
SCREAMING_SNAKE_CASE : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE : int = '''bleu''' if calc_bleu else '''rouge'''
SCREAMING_SNAKE_CASE : Union[str, Any] = score_fn(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : int = len(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = time.time() - start_time
SCREAMING_SNAKE_CASE : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
SCREAMING_SNAKE_CASE : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE : Union[str, Any] = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(_lowercase , _lowercase , indent=_lowercase )
print(_lowercase )
write_txt_file(_lowercase , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_lowercase , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_lowercase )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
for partial_result in partial_results:
records.extend(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = sorted(_lowercase , key=lambda _lowercase : x["id"] )
SCREAMING_SNAKE_CASE : Tuple = [x['''pred'''] for x in records]
return preds
def A ( _lowercase , _lowercase , _lowercase ):
# WAIT FOR lots of .json files
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
logger.info('''waiting for all nodes to finish''' )
SCREAMING_SNAKE_CASE : List[str] = None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(_lowercase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE : Any = lmap(_lowercase , _lowercase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 182 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Tuple=2, lowerCamelCase : List[Any]=3, lowerCamelCase : Any=64, lowerCamelCase : Tuple=None ):
'''simple docstring'''
lowercase__ = np.random.default_rng(__UpperCamelCase )
lowercase__ = length
lowercase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowercase__ = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa )
def __len__( self : Tuple ):
'''simple docstring'''
return self.length
def __getitem__( self : str, lowerCamelCase : Optional[int] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class _UpperCAmelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : List[str]=0, lowerCamelCase : Optional[int]=0, lowerCamelCase : Optional[int]=False ):
'''simple docstring'''
super().__init__()
lowercase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ = True
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ = False
return x * self.a[0] + self.b[0]
class _UpperCAmelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : List[str]=0, lowerCamelCase : List[str]=0, lowerCamelCase : str=False ):
'''simple docstring'''
super().__init__()
lowercase__ = torch.nn.Parameter(torch.tensor(__UpperCamelCase ).float() )
lowercase__ = torch.nn.Parameter(torch.tensor(__UpperCamelCase ).float() )
lowercase__ = True
def lowercase__ ( self : int, lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ = False
return x * self.a + self.b
def a ( lowerCamelCase_ , lowerCamelCase_ = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowercase__ = load_dataset('''csv''' , data_files=lowerCamelCase_ )
lowercase__ = datasets['''train'''].unique('''label''' )
lowercase__ = {v: i for i, v in enumerate(lowerCamelCase_ )}
def tokenize_function(lowerCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
if "label" in examples:
lowercase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(lowerCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=2 )
lowercase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 207 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 0 |
from sklearn.metrics import recall_score
import datasets
__snake_case = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
__snake_case = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
__snake_case = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=1 , snake_case__="binary" , snake_case__=None , snake_case__="warn" , ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict =recall_score(
__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase , zero_division=__UpperCamelCase , )
return {"recall": float(__UpperCamelCase ) if score.size == 1 else score}
| 348 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=2 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_6 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=6 , lowerCAmelCase=6 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , lowerCAmelCase=1_0_0_0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= num_channels
__lowercase= image_size
__lowercase= patch_size
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= coordinate_size
__lowercase= shape_size
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
__lowercase= range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowercase= text_seq_length
__lowercase= (image_size // patch_size) ** 2 + 1
__lowercase= self.text_seq_length + self.image_seq_length
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowercase= ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowercase= bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowercase= bbox[i, j, 3]
__lowercase= bbox[i, j, 1]
__lowercase= tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowercase= bbox[i, j, 2]
__lowercase= bbox[i, j, 0]
__lowercase= tmp_coordinate
__lowercase= tf.constant(__UpperCamelCase )
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.text_seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowercase= LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= TFLayoutLMvaModel(config=__UpperCamelCase )
# text + image
__lowercase= model(__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
__lowercase= model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , training=__UpperCamelCase , )
__lowercase= model(__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowercase= model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowercase= model({'pixel_values': pixel_values} , training=__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= TFLayoutLMvaForSequenceClassification(config=__UpperCamelCase )
__lowercase= model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= TFLayoutLMvaForTokenClassification(config=__UpperCamelCase )
__lowercase= model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= 2
__lowercase= TFLayoutLMvaForQuestionAnswering(config=__UpperCamelCase )
__lowercase= model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs
__lowercase= {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase_ : Any =(
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : int =(
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCamelCase_ : Dict =False
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Dict =False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
return True
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= copy.deepcopy(__UpperCamelCase )
if model_class in get_values(__UpperCamelCase ):
__lowercase= {
k: tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__UpperCamelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCamelCase ):
__lowercase= tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
__lowercase= tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowercase= tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
__lowercase= tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCamelCase ):
__lowercase= tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _A (self ):
__lowercase= TFLayoutLMvaModelTester(self )
__lowercase= ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(__UpperCamelCase )
if getattr(__UpperCamelCase , 'hf_compute_loss' , __UpperCamelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowercase= self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
__lowercase= prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCamelCase )[0]
]
__lowercase= added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowercase= self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
__lowercase= prepared_for_class.pop('input_ids' )
__lowercase= model(__UpperCamelCase , **__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowercase= self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
__lowercase= prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowercase= prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowercase= -1_0_0
__lowercase= tf.convert_to_tensor(__UpperCamelCase )
__lowercase= model(__UpperCamelCase , **__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowercase= self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
__lowercase= model(__UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowercase= self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase )
# Get keys that were added with the _prepare_for_class function
__lowercase= prepared_for_class.keys() - inputs_dict.keys()
__lowercase= inspect.signature(model.call ).parameters
__lowercase= list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowercase= {0: 'input_ids'}
for label_key in label_keys:
__lowercase= signature_names.index(__UpperCamelCase )
__lowercase= label_key
__lowercase= sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowercase= []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowercase= prepared_for_class[value]
__lowercase= tuple(__UpperCamelCase )
# Send to model
__lowercase= model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _A (self ):
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _A (self ):
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase= type
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _A (self ):
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _A (self ):
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _A (self ):
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@slow
def _A (self ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= TFLayoutLMvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase ) if is_vision_available() else None
@slow
def _A (self ):
__lowercase= TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=__UpperCamelCase , return_tensors='tf' ).pixel_values
__lowercase= tf.constant([[1, 2]] )
__lowercase= tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowercase= model(input_ids=__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase )
# verify the logits
__lowercase= (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , __UpperCamelCase )
__lowercase= tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 295 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292 | 0 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = [False] * len(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = [-1] * len(lowerCAmelCase__ )
def dfs(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase__ , 1 - c )
for i in range(len(lowerCAmelCase__ ) ):
if not visited[i]:
dfs(lowerCAmelCase__ , 0 )
for i in range(len(lowerCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowercase__ : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 224 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 0 |
def a__ ( A_, A_ ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def a__ ( ):
'''simple docstring'''
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 88 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
"""simple docstring"""
import numpy as np
import datasets
a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str ):
# convert to numpy arrays
_A = np.array(__UpperCamelCase )
_A = np.array(__UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
_A = X - np.mean(__UpperCamelCase )
_A = np.cov(reference_distribution.T )
try:
_A = np.linalg.inv(__UpperCamelCase )
except np.linalg.LinAlgError:
_A = np.linalg.pinv(__UpperCamelCase )
_A = np.dot(__UpperCamelCase , __UpperCamelCase )
_A = np.dot(__UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 315 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _lowerCamelCase ( unittest.TestCase , lowercase_ ):
def snake_case_ (self ) -> Any:
UpperCamelCase = load_tool("text-to-speech" )
self.tool.setup()
def snake_case_ (self ) -> List[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase = self.tool("hey" )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def snake_case_ (self ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase = self.tool("hey" )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 153 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __snake_case ( _UpperCAmelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _A ( nn.Module ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
super().__init__()
__a = module
__a = nn.Sequential(
nn.Linear(module.in_features , __UpperCamelCase , bias=__UpperCamelCase) , nn.Linear(__UpperCamelCase , module.out_features , bias=__UpperCamelCase) , )
__a = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCamelCase)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.module(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase) + self.adapter(__UpperCamelCase)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _A ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
UpperCamelCase__ : Union[str, Any] = '''bigscience/bloom-1b7'''
# Constant values
UpperCamelCase__ : Optional[int] = 2.109659552692574
UpperCamelCase__ : List[str] = '''Hello my name is'''
UpperCamelCase__ : Tuple = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
UpperCamelCase__ : Tuple = 10
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = AutoTokenizer.from_pretrained(self.model_name)
class _A ( lowercase_ ):
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''')
__a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_abit.config
self.assertTrue(hasattr(__UpperCamelCase , '''quantization_config'''))
__a = config.to_dict()
__a = config.to_diff_dict()
__a = config.to_json_string()
def _lowerCamelCase ( self : int):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a = self.model_fpaa.get_memory_footprint()
__a = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
__a = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def _lowerCamelCase ( self : int):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCamelCase , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.tokenizer(self.input_text , return_tensors='''pt''')
__a = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCamelCase) , self.EXPECTED_OUTPUTS)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = BitsAndBytesConfig()
__a = True
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCamelCase , device_map='''auto''')
__a = self.tokenizer(self.input_text , return_tensors='''pt''')
__a = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCamelCase) , self.EXPECTED_OUTPUTS)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
with self.assertRaises(__UpperCamelCase), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCamelCase)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = BitsAndBytesConfig()
with self.assertRaises(__UpperCamelCase):
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCamelCase , load_in_abit=__UpperCamelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
with self.assertRaises(__UpperCamelCase):
# Tries with `str`
self.model_abit.to('''cpu''')
with self.assertRaises(__UpperCamelCase):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(__UpperCamelCase):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0'''))
with self.assertRaises(__UpperCamelCase):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCamelCase):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a = self.tokenizer(self.input_text , return_tensors='''pt''')
__a = self.model_fpaa.to(torch.floataa)
__a = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0) , max_new_tokens=10)
# Check this does not throw an error
__a = self.model_fpaa.to('''cpu''')
# Check this does not throw an error
__a = self.model_fpaa.half()
# Check this does not throw an error
__a = self.model_fpaa.float()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCamelCase , device_map='''auto''')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _A ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : Optional[Any]):
'''simple docstring'''
__a = '''t5-small'''
__a = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__a = AutoTokenizer.from_pretrained(cls.model_name)
__a = '''Translate in German: Hello, my dog is cute'''
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : str):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a = TaForConditionalGeneration._keep_in_fpaa_modules
__a = None
# test with `t5-small`
__a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
__a = self.tokenizer(self.input_text , return_tensors='''pt''').to(0)
__a = model.generate(**__UpperCamelCase)
# test with `flan-t5-small`
__a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
__a = self.tokenizer(self.input_text , return_tensors='''pt''').to(0)
__a = model.generate(**__UpperCamelCase)
__a = modules
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
__a = self.tokenizer(self.input_text , return_tensors='''pt''').to(0)
__a = model.generate(**__UpperCamelCase)
# test with `flan-t5-small`
__a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
__a = self.tokenizer(self.input_text , return_tensors='''pt''').to(0)
__a = model.generate(**__UpperCamelCase)
class _A ( lowercase_ ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# model_name
__a = '''bigscience/bloom-560m'''
__a = '''t5-small'''
# Different types of model
__a = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
# Sequence classification model
__a = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
# CausalLM model
__a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
# Seq2seq model
__a = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _A ( lowercase_ ):
def _lowerCamelCase ( self : str):
'''simple docstring'''
super().setUp()
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _A ( lowercase_ ):
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().setUp()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCamelCase , device_map='''balanced''')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
__a = self.tokenizer(self.input_text , return_tensors='''pt''')
# Second real batch
__a = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCamelCase) , self.EXPECTED_OUTPUTS)
class _A ( lowercase_ ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''facebook/opt-350m'''
super().setUp()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''')) < version.parse('''0.37.0'''):
return
# Step 1: freeze all parameters
__a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
__a = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCamelCase)):
__a = LoRALayer(module.q_proj , rank=16)
__a = LoRALayer(module.k_proj , rank=16)
__a = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
__a = self.tokenizer('''Test batch ''' , return_tensors='''pt''').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a = model.forward(**__UpperCamelCase)
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCamelCase , __UpperCamelCase):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(__UpperCamelCase , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _A ( lowercase_ ):
UpperCamelCase__ : str = '''gpt2-xl'''
UpperCamelCase__ : List[Any] = 3.3191854854152187
| 49 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {'vocab_file': 'spiece.model'}
_SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
_SCREAMING_SNAKE_CASE : Any = {
'AI-Sweden/gpt-sw3-126m': 20_48,
'AI-Sweden/gpt-sw3-350m': 20_48,
'AI-Sweden/gpt-sw3-1.6b': 20_48,
'AI-Sweden/gpt-sw3-6.7b': 20_48,
'AI-Sweden/gpt-sw3-20b': 20_48,
}
class A__ ( lowercase_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['input_ids', 'attention_mask']
def __init__( self , __snake_case , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case = None , **__snake_case , ):
snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
snake_case = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
snake_case = '''<|endoftext|>''' if eos_token is None else eos_token
snake_case = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
snake_case = unk_token if pad_token is None else pad_token
snake_case = eos_token if bos_token is None else bos_token
else:
snake_case = '''<pad>''' if pad_token is None else pad_token
snake_case = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case = do_lower_case
snake_case = remove_space
snake_case = keep_accents
snake_case = vocab_file
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
snake_case = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
snake_case = re.compile(
F'''[{"".join(map(__UpperCamelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]''' )
def __getstate__( self ):
snake_case = self.__dict__.copy()
snake_case = None
return state
def __setstate__( self , __snake_case ):
snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case = {}
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a_ ( self ):
return len(self.sp_model )
def a_ ( self , __snake_case ):
snake_case = self.non_printing_characters_re.sub('''''' , __UpperCamelCase )
# Normalize whitespaces
snake_case = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
snake_case = unicodedata.normalize('''NFC''' , __UpperCamelCase )
return text
def a_ ( self , __snake_case , **__snake_case ):
snake_case = self.preprocess_text(__UpperCamelCase )
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def a_ ( self , __snake_case ):
return self.sp_model.PieceToId(__UpperCamelCase )
def a_ ( self , __snake_case ):
return self.sp_model.IdToPiece(__UpperCamelCase )
@staticmethod
def a_ ( __snake_case ):
return out_string
def a_ ( self , __snake_case ):
snake_case = []
snake_case = ''''''
snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
snake_case = True
snake_case = []
else:
current_sub_tokens.append(__UpperCamelCase )
snake_case = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string
def a_ ( self ):
snake_case = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self , __snake_case , __snake_case = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
snake_case = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def a_ ( self , __snake_case , __snake_case = False ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case = self.preprocess_text(__UpperCamelCase )
snake_case = self.sp_model.encode(__UpperCamelCase )
else:
snake_case = [self.preprocess_text(__UpperCamelCase ) for t in text]
snake_case = self.sp_model.encode(__UpperCamelCase )
if return_tensors is True or return_tensors == "pt":
snake_case = torch.tensor(__UpperCamelCase )
return token_ids
def a_ ( self , __snake_case ):
return self.sp_model.decode(__UpperCamelCase )
def a_ ( self , __snake_case ):
snake_case = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
snake_case = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(__UpperCamelCase ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=__UpperCamelCase )
| 127 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase_ ):
__a = (UnCLIPScheduler,)
def lowercase ( self : Tuple , **_lowerCamelCase : Union[str, Any] ):
_snake_case = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__UpperCamelCase )
return config
def lowercase ( self : List[str] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowercase ( self : str ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def lowercase ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def lowercase ( self : Optional[Any] ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__UpperCamelCase )
def lowercase ( self : List[Any] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowercase ( self : List[Any] ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__UpperCamelCase , prev_timestep=__UpperCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(variance_type='''fixed_small_log''' )
_snake_case = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def lowercase ( self : int ):
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(variance_type='''learned_range''' )
_snake_case = scheduler_class(**__UpperCamelCase )
_snake_case = 0.5
assert scheduler._get_variance(1 , predicted_variance=__UpperCamelCase ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__UpperCamelCase ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__UpperCamelCase ) - -0.0_0_1_0_0_1_1 < 1e-5
def lowercase ( self : List[Any] ):
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__UpperCamelCase )
_snake_case = scheduler.timesteps
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
_snake_case = torch.manual_seed(0 )
for i, t in enumerate(__UpperCamelCase ):
# 1. predict noise residual
_snake_case = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_snake_case = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
_snake_case = pred_prev_sample
_snake_case = torch.sum(torch.abs(__UpperCamelCase ) )
_snake_case = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def lowercase ( self : int ):
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(25 )
_snake_case = scheduler.timesteps
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
_snake_case = torch.manual_seed(0 )
for i, t in enumerate(__UpperCamelCase ):
# 1. predict noise residual
_snake_case = model(__UpperCamelCase , __UpperCamelCase )
if i + 1 == timesteps.shape[0]:
_snake_case = None
else:
_snake_case = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_snake_case = scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , prev_timestep=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
_snake_case = pred_prev_sample
_snake_case = torch.sum(torch.abs(__UpperCamelCase ) )
_snake_case = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def lowercase ( self : Any ):
pass
def lowercase ( self : Dict ):
pass
| 288 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowerCamelCase ( self :List[str] ):
super().setUp()
def lowerCamelCase ( self :int , **__UpperCamelCase :List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , **__UpperCamelCase :Optional[int] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = "永和服装饰品有限公司,今天天气非常好"
A = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCamelCase ( self :int ):
A = self.get_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.get_rust_tokenizer()
A, A = self.get_chinese_input_output_texts()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
A = tokens + [tokenizer.unk_token]
A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
pass
def lowerCamelCase ( self :Tuple ):
pass
def lowerCamelCase ( self :List[str] ):
pass
| 292 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = str(id_ )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Any = {} # {vertex:distance}
def __lt__( self : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : List[Any] ):
'''simple docstring'''
return self.id
def __A ( self : Optional[int] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
self.neighbors.append(__UpperCamelCase )
def __A ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = weight
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowercase )
graph[b - 1].add_edge(graph[a - 1] , _lowercase )
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = []
for u in graph:
SCREAMING_SNAKE_CASE : Tuple = math.inf
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = graph[:]
while q:
SCREAMING_SNAKE_CASE : Dict = min(_lowercase )
q.remove(_lowercase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE : Optional[Any] = u
SCREAMING_SNAKE_CASE : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_lowercase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A ( _lowercase , _lowercase ):
for u in graph:
SCREAMING_SNAKE_CASE : Union[str, Any] = math.inf
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = list(_lowercase )
hq.heapify(_lowercase )
while h:
SCREAMING_SNAKE_CASE : List[Any] = hq.heappop(_lowercase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE : Union[str, Any] = u
SCREAMING_SNAKE_CASE : Union[str, Any] = u.edges[v.id]
hq.heapify(_lowercase )
for i in range(1 , len(_lowercase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 |
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Any = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 207 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__snake_case = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =_ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase : str =get_sagemaker_input()
else:
UpperCAmelCase : Optional[int] =get_cluster_input()
return config
def lowerCAmelCase_ ( __lowerCAmelCase=None )-> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase : int =subparsers.add_parser('''config''' , description=__lowerCAmelCase )
else:
UpperCAmelCase : Any =argparse.ArgumentParser('''Accelerate config command''' , description=__lowerCAmelCase )
parser.add_argument(
'''--config_file''' , default=__lowerCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def lowerCAmelCase_ ( __lowerCAmelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase : int =get_user_input()
if args.config_file is not None:
UpperCAmelCase : str =args.config_file
else:
if not os.path.isdir(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
UpperCAmelCase : Optional[Any] =default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__lowerCAmelCase )
else:
config.to_yaml_file(__lowerCAmelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any =config_command_parser()
UpperCAmelCase : Any =parser.parse_args()
config_command(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 348 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Tuple:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowercase= cst_fwd.get(lowercase__ , np.inf )
__lowercase= cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowercase= new_cost_f
__lowercase= v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowercase= cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= -1
__lowercase= set()
__lowercase= set()
__lowercase= {source: 0}
__lowercase= {destination: 0}
__lowercase= {source: None}
__lowercase= {destination: None}
__lowercase= PriorityQueue()
__lowercase= PriorityQueue()
__lowercase= np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowercase, __lowercase= queue_forward.get()
visited_forward.add(lowercase__ )
__lowercase, __lowercase= queue_backward.get()
visited_backward.add(lowercase__ )
__lowercase= pass_and_relaxation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
__lowercase= pass_and_relaxation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowercase= shortest_distance
return shortest_path_distance
lowerCAmelCase = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
lowerCAmelCase = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowercase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : WhisperForConditionalGeneration , SCREAMING_SNAKE_CASE_ : WhisperProcessor , SCREAMING_SNAKE_CASE_ : AutoencoderKL , SCREAMING_SNAKE_CASE_ : CLIPTextModel , SCREAMING_SNAKE_CASE_ : CLIPTokenizer , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE_ : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE_ : CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
lowerCAmelCase_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict=1_6_0_0_0 , SCREAMING_SNAKE_CASE_ : int = 5_1_2 , SCREAMING_SNAKE_CASE_ : int = 5_1_2 , SCREAMING_SNAKE_CASE_ : int = 5_0 , SCREAMING_SNAKE_CASE_ : float = 7.5 , SCREAMING_SNAKE_CASE_ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase_ : Union[str, Any] = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors='pt' , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
lowerCAmelCase_ : Optional[int] = self.speech_model.generate(__UpperCamelCase , max_length=4_8_0_0_0_0 )
lowerCAmelCase_ : Dict = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : str = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = len(__UpperCamelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__UpperCamelCase )}." )
# get prompt text embeddings
lowerCAmelCase_ : List[Any] = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase_ : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase_ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase_ : Any = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase_ : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = text_embeddings.shape
lowerCAmelCase_ : Dict = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
lowerCAmelCase_ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase_ : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase_ : Optional[Any] = 4_2
if negative_prompt is None:
lowerCAmelCase_ : int = [''] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
F" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase_ : str = negative_prompt
lowerCAmelCase_ : Tuple = text_input_ids.shape[-1]
lowerCAmelCase_ : Optional[int] = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' , )
lowerCAmelCase_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ : int = uncond_embeddings.shape[1]
lowerCAmelCase_ : Optional[Any] = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
lowerCAmelCase_ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase_ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase_ : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase_ : int = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device='cpu' , dtype=__UpperCamelCase ).to(
self.device )
else:
lowerCAmelCase_ : List[str] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase_ : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase_ : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ : List[Any] = {}
if accepts_eta:
lowerCAmelCase_ : List[str] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
lowerCAmelCase_ : str = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase_ ,lowerCAmelCase_ : int = noise_pred.chunk(2 )
lowerCAmelCase_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : int = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Tuple = 1 / 0.1_82_15 * latents
lowerCAmelCase_ : Dict = self.vae.decode(__UpperCamelCase ).sample
lowerCAmelCase_ : int = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ : Optional[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 224 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase : List[Any] = threading.Lock()
__lowerCAmelCase : Optional[logging.Handler] = None
__lowerCAmelCase : Optional[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__lowerCAmelCase : List[Any] = logging.WARNING
__lowerCAmelCase : Optional[int] = True
def a__ ( ):
'''simple docstring'''
__magic_name__ = os.getenv("""TRANSFORMERS_VERBOSITY""", A_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def a__ ( ):
'''simple docstring'''
return __name__.split(""".""" )[0]
def a__ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def a__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def a__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def a__ ( ):
'''simple docstring'''
return log_levels
def a__ ( A_ = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(A_ )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( A_ ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(A_ )
def a__ ( ):
'''simple docstring'''
return set_verbosity(A_ )
def a__ ( ):
'''simple docstring'''
return set_verbosity(A_ )
def a__ ( ):
'''simple docstring'''
return set_verbosity(A_ )
def a__ ( ):
'''simple docstring'''
return set_verbosity(A_ )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( A_ ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(A_ )
def a__ ( A_ ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(A_ )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def a__ ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(A_ )
def a__ ( self, *A_, **A_ ):
'''simple docstring'''
__magic_name__ = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""", A_ )
if no_advisory_warnings:
return
self.warning(*A_, **A_ )
__lowerCAmelCase : int = warning_advice
@functools.lru_cache(A_ )
def a__ ( self, *A_, **A_ ):
'''simple docstring'''
self.warning(*A_, **A_ )
__lowerCAmelCase : Optional[int] = warning_once
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any] ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
__magic_name__ = args[0] if args else None
def __iter__( self : str ) -> Tuple:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Optional[int] , UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
def empty_fn(*UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self
def __exit__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
return
class UpperCAmelCase_ :
'''simple docstring'''
def __call__( self : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*__UpperCamelCase , **__UpperCamelCase )
else:
return EmptyTqdm(*__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : Any , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__UpperCamelCase , **__UpperCamelCase )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase : Any = _tqdm_cls()
def a__ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def a__ ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 88 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : Dict = 10 , _snake_case : List[Any] = 10_00 , _snake_case : Dict = True ) -> Any:
'''simple docstring'''
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ) -> Tuple:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Tuple ) -> Dict:
'''simple docstring'''
assert (
isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(_snake_case : List[str] ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
_A = lower
_A = higher
_A = []
while True:
_A = get_avg(_snake_case , _snake_case )
last_numbers.append(_snake_case )
if answer(_snake_case ) == "low":
_A = number
elif answer(_snake_case ) == "high":
_A = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def _snake_case ( ) -> str:
'''simple docstring'''
_A = int(input('Enter lower value : ' ).strip() )
_A = int(input('Enter high value : ' ).strip() )
_A = int(input('Enter value to guess : ' ).strip() )
guess_the_number(_snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
main()
| 315 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.