code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Optional[Any] = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if len(__magic_name__ ) == 0 or len(__magic_name__ ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__magic_name__ ) )
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Union[str, Any] = [sequences]
snake_case_ : Dict = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__magic_name__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_a )
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=ZeroShotClassificationArgumentHandler() , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = args_parser
super().__init__(*__magic_name__ , **__magic_name__ )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def lowerCamelCase (self , __magic_name__ , __magic_name__=True , __magic_name__=True , __magic_name__=TruncationStrategy.ONLY_FIRST , **__magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
snake_case_ : List[str] = self.tokenizer.eos_token
try:
snake_case_ : List[str] = self.tokenizer(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , )
except Exception as e:
if "too short" in str(__magic_name__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
snake_case_ : Union[str, Any] = self.tokenizer(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCamelCase (self , **__magic_name__ ) -> int:
'''simple docstring'''
if kwargs.get('''multi_class''' , __magic_name__ ) is not None:
snake_case_ : Optional[int] = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
snake_case_ : Optional[int] = {}
if "candidate_labels" in kwargs:
snake_case_ : Any = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
snake_case_ : str = kwargs['''hypothesis_template''']
snake_case_ : Optional[int] = {}
if "multi_label" in kwargs:
snake_case_ : Optional[int] = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__(self , __magic_name__ , *__magic_name__ , **__magic_name__ , ) -> Dict:
'''simple docstring'''
if len(__magic_name__ ) == 0:
pass
elif len(__magic_name__ ) == 1 and "candidate_labels" not in kwargs:
snake_case_ : Any = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__=None , __magic_name__="This example is {}." ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : List[str] = self._args_parser(__magic_name__ , __magic_name__ , __magic_name__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(__magic_name__ , __magic_name__ ) ):
snake_case_ : List[str] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__magic_name__ ) - 1,
**model_input,
}
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : int = inputs['''candidate_label''']
snake_case_ : Dict = inputs['''sequence''']
snake_case_ : Optional[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
snake_case_ : int = self.model(**__magic_name__ )
snake_case_ : List[str] = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def lowerCamelCase (self , __magic_name__ , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : str = [outputs['''candidate_label'''] for outputs in model_outputs]
snake_case_ : Dict = [outputs['''sequence'''] for outputs in model_outputs]
snake_case_ : str = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
snake_case_ : Union[str, Any] = logits.shape[0]
snake_case_ : Optional[int] = len(__magic_name__ )
snake_case_ : Optional[Any] = N // n
snake_case_ : List[str] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__magic_name__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
snake_case_ : Optional[Any] = self.entailment_id
snake_case_ : Optional[int] = -1 if entailment_id == 0 else 0
snake_case_ : Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]]
snake_case_ : Tuple = np.exp(__magic_name__ ) / np.exp(__magic_name__ ).sum(-1 , keepdims=__magic_name__ )
snake_case_ : Optional[int] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
snake_case_ : Dict = reshaped_outputs[..., self.entailment_id]
snake_case_ : Union[str, Any] = np.exp(__magic_name__ ) / np.exp(__magic_name__ ).sum(-1 , keepdims=__magic_name__ )
snake_case_ : Dict = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 60 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _A ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
if not is_accelerate_available():
return method
lowerCAmelCase__ = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCAmelCase_ ) < version.parse("0.17.0" ):
return method
def wrapper(self : List[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any] ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCAmelCase_ , **lowerCAmelCase_ )
return wrapper
| 61 | '''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=sys.maxsize ):
SCREAMING_SNAKE_CASE : str = "bilinear"
SCREAMING_SNAKE_CASE : str = max_size
SCREAMING_SNAKE_CASE : int = short_edge_length
def __call__( self : Dict , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = []
for img in imgs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE : Optional[int] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
SCREAMING_SNAKE_CASE : Any = size * 1.0 / min(UpperCAmelCase_ , UpperCAmelCase_ )
if h < w:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = scale * h, size
if max(UpperCAmelCase_ , UpperCAmelCase_ ) > self.max_size:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.max_size * 1.0 / max(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = newh * scale
SCREAMING_SNAKE_CASE : List[str] = neww * scale
SCREAMING_SNAKE_CASE : List[str] = int(neww + 0.5 )
SCREAMING_SNAKE_CASE : str = int(newh + 0.5 )
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : int = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE : Optional[int] = nn.functional.interpolate(
UpperCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCAmelCase_ ).squeeze(0 )
img_augs.append(UpperCAmelCase_ )
return img_augs
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
SCREAMING_SNAKE_CASE : Optional[Any] = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE : List[str] = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE : List[str] = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE : List[str] = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE : int = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = lambda UpperCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def _A ( self : List[str] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = tuple(max(UpperCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
SCREAMING_SNAKE_CASE : Dict = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE : str = [
nn.functional.pad(
UpperCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
return torch.stack(UpperCAmelCase_ ), torch.tensor(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=False ):
with torch.no_grad():
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = [images]
if single_image:
assert len(UpperCAmelCase_ ) == 1
for i in range(len(UpperCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCAmelCase_ , images.pop(UpperCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([im.shape[:2] for im in images] )
SCREAMING_SNAKE_CASE : Any = self.aug(UpperCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.normalizer(UpperCAmelCase_ ) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.pad(UpperCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.true_divide(UpperCAmelCase_ , UpperCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert torch.isfinite(lowercase ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = box_size
tensor[:, 0].clamp_(min=0 , max=lowercase )
tensor[:, 1].clamp_(min=0 , max=lowercase )
tensor[:, 2].clamp_(min=0 , max=lowercase )
tensor[:, 3].clamp_(min=0 , max=lowercase )
| 62 | '''simple docstring'''
import copy
import re
class __A :
a__ : Optional[int] = """hp"""
a__ : Optional[Any] = {}
a__ : List[Any] = None
@classmethod
def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def _lowercase (__a : List[Any] , __a : List[str] ):
if len(__a ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a : Union[str, Any] ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def _lowercase (__a : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def _lowercase (__a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def _lowercase (cls : Any ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
UpperCAmelCase_ = info
@classmethod
def _lowercase (cls : int , __a : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-"
UpperCAmelCase_ = f"""{key}{sep}{v}"""
name.append(__a )
return "_".join(__a )
@classmethod
def _lowercase (cls : Dict , __a : Dict ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 78 | 0 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 0 |
def A__ ( snake_case_ : str , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Optional[Any]= len(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= []
for i in range(len(snake_case_ ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE__: List[str]= True
for j in range(snake_case_ ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE__: List[Any]= False
break
if match_found:
position.append(snake_case_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 64 | '''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 0 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__UpperCAmelCase = namedtuple('covid_data', 'cases deaths recovered')
def lowerCAmelCase ( __UpperCamelCase = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
UpperCAmelCase__ : int = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(__UpperCamelCase ).content ).xpath(__UpperCamelCase ) )
__UpperCAmelCase = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 65 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase = logging.get_logger(__name__)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Optional[Any] = R'\w+[.]\d+'
_lowercase : str = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
_lowercase : int = key.replace(SCREAMING_SNAKE_CASE , '_'.join(pat.split('.' ) ) )
return key
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Dict = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_lowercase : str = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_lowercase : Optional[Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_lowercase : str = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowercase : List[str] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_lowercase : Any = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowercase : List[str] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
_lowercase : Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowercase : Any = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowercase : List[Any] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ) -> Optional[int]:
# Step 1: Convert pytorch tensor to numpy
_lowercase : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_lowercase : int = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
_lowercase : str = flatten_dict(SCREAMING_SNAKE_CASE )
_lowercase : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowercase : Optional[Any] = rename_key(SCREAMING_SNAKE_CASE )
_lowercase : Dict = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
_lowercase , _lowercase : Optional[Any] = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
_lowercase : str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 66 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """perceiver"""
def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-4
def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 78 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case = logging.get_logger(__name__)
snake_case = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''whisper'''
SCREAMING_SNAKE_CASE_ : str = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int ,__A : Dict=5_1865 ,__A : List[str]=80 ,__A : str=6 ,__A : List[str]=4 ,__A : int=6 ,__A : Optional[Any]=4 ,__A : int=1536 ,__A : List[str]=1536 ,__A : Optional[Any]=0.0 ,__A : Union[str, Any]=0.0 ,__A : int=5_0257 ,__A : Dict=True ,__A : int=True ,__A : int="gelu" ,__A : Union[str, Any]=256 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : Optional[int]=0.0 ,__A : List[Any]=0.02 ,__A : Optional[int]=False ,__A : int=1500 ,__A : Union[str, Any]=448 ,__A : Optional[Any]=5_0256 ,__A : List[str]=5_0256 ,__A : Dict=5_0256 ,__A : int=None ,__A : Optional[Any]=[220, 5_0256] ,__A : Optional[Any]=False ,__A : Dict=256 ,__A : Tuple=False ,__A : Union[str, Any]=0.05 ,__A : Optional[Any]=10 ,__A : int=2 ,__A : Optional[int]=0.0 ,__A : Optional[int]=10 ,__A : Any=0 ,__A : Optional[int]=7 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = num_mel_bins
_lowercase = d_model
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = encoder_ffn_dim
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase = max_source_positions
_lowercase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_lowercase = classifier_proj_size
_lowercase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase = apply_spec_augment
_lowercase = mask_time_prob
_lowercase = mask_time_length
_lowercase = mask_time_min_masks
_lowercase = mask_feature_prob
_lowercase = mask_feature_length
_lowercase = mask_feature_min_masks
_lowercase = median_filter_width
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,suppress_tokens=__A ,begin_suppress_tokens=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
return common_inputs
def __UpperCAmelCase ( self : Tuple ,__A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional["TensorType"] = None ,__A : int = 2_2050 ,__A : float = 5.0 ,__A : int = 220 ,) -> Mapping[str, Any]:
_lowercase = OrderedDict()
_lowercase = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=__A ,framework=__A ,sampling_rate=__A ,time_duration=__A ,frequency=__A ,)
_lowercase = encoder_inputs['input_features'].shape[2]
_lowercase = encoder_sequence_length // 2 if self.use_past else seq_length
_lowercase = super().generate_dummy_inputs(
preprocessor.tokenizer ,__A ,__A ,__A ,__A )
_lowercase = encoder_inputs.pop('input_features' )
_lowercase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
_lowercase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> float:
return 1e-3 | 67 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 0 |
from PIL import Image
def lowercase__ ( A_: Image , A_: int ) -> Image:
"""simple docstring"""
__UpperCAmelCase =(259 * (level + 255)) / (255 * (259 - level))
def contrast(A_: int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(A_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
__A = change_contrast(img, 1_70)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 68 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 | 0 |
'''simple docstring'''
import functools
from typing import Any
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : list[str] ) -> bool:
# Validation
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not all(
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
__snake_case = {}
__snake_case = "WORD_KEEPER"
for word in words:
__snake_case = trie
for c in word:
if c not in trie_node:
__snake_case = {}
__snake_case = trie_node[c]
__snake_case = True
__snake_case = len(_UpperCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_UpperCAmelCase : int ) -> bool:
if index == len_string:
return True
__snake_case = trie
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = trie_node.get(string[i] , _UpperCAmelCase )
if trie_node is None:
return False
if trie_node.get(_UpperCAmelCase , _UpperCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
from ...configuration_utils import PretrainedConfig
lowerCamelCase : List[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''tapas'''
def __init__( self : Dict , A_ : int=30522 , A_ : int=768 , A_ : Any=12 , A_ : Tuple=12 , A_ : Optional[Any]=3072 , A_ : Optional[Any]="gelu" , A_ : Union[str, Any]=0.1 , A_ : Dict=0.1 , A_ : Optional[Any]=1024 , A_ : Tuple=[3, 256, 256, 2, 256, 256, 10] , A_ : Optional[Any]=0.02 , A_ : str=1E-12 , A_ : str=0 , A_ : Tuple=10.0 , A_ : int=0 , A_ : int=1.0 , A_ : Any=None , A_ : List[str]=1.0 , A_ : Optional[Any]=False , A_ : List[Any]=None , A_ : Optional[int]=1.0 , A_ : Union[str, Any]=1.0 , A_ : List[str]=False , A_ : Optional[int]=False , A_ : Any="ratio" , A_ : List[str]=None , A_ : Dict=None , A_ : int=64 , A_ : Union[str, Any]=32 , A_ : Dict=False , A_ : List[str]=True , A_ : int=False , A_ : Tuple=False , A_ : Dict=True , A_ : Tuple=False , A_ : Optional[Any]=None , A_ : Union[str, Any]=None , **A_ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_sizes
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCamelCase_ = positive_label_weight
lowerCamelCase_ = num_aggregation_labels
lowerCamelCase_ = aggregation_loss_weight
lowerCamelCase_ = use_answer_as_supervision
lowerCamelCase_ = answer_loss_importance
lowerCamelCase_ = use_normalized_answer_loss
lowerCamelCase_ = huber_loss_delta
lowerCamelCase_ = temperature
lowerCamelCase_ = aggregation_temperature
lowerCamelCase_ = use_gumbel_for_cells
lowerCamelCase_ = use_gumbel_for_aggregation
lowerCamelCase_ = average_approximation_function
lowerCamelCase_ = cell_selection_preference
lowerCamelCase_ = answer_loss_cutoff
lowerCamelCase_ = max_num_rows
lowerCamelCase_ = max_num_columns
lowerCamelCase_ = average_logits_per_cell
lowerCamelCase_ = select_one_column
lowerCamelCase_ = allow_empty_column_selection
lowerCamelCase_ = init_cell_selection_weights_to_zero
lowerCamelCase_ = reset_position_index_per_cell
lowerCamelCase_ = disable_per_token_loss
# Aggregation hyperparameters
lowerCamelCase_ = aggregation_labels
lowerCamelCase_ = no_aggregation_label_index
if isinstance(self.aggregation_labels , A_ ):
lowerCamelCase_ = {int(A_ ): v for k, v in aggregation_labels.items()}
| 70 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
_lowerCamelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = tmp_path_factory.getbasetemp() / "cache"
UpperCAmelCase_ : int = test_hf_cache_home / "datasets"
UpperCAmelCase_ : List[Any] = test_hf_cache_home / "metrics"
UpperCAmelCase_ : str = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : int = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Tuple = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope="session" )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _SCREAMING_SNAKE_CASE )
| 71 | '''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'nat'
UpperCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=4 , snake_case_=3 , snake_case_=64 , snake_case_=[3, 4, 6, 5] , snake_case_=[2, 4, 8, 16] , snake_case_=7 , snake_case_=3.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =patch_size
lowercase =num_channels
lowercase =embed_dim
lowercase =depths
lowercase =len(snake_case_ )
lowercase =num_heads
lowercase =kernel_size
lowercase =mlp_ratio
lowercase =qkv_bias
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =drop_path_rate
lowercase =hidden_act
lowercase =layer_norm_eps
lowercase =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase =int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
lowercase =layer_scale_init_value
lowercase =['''stem'''] + [f'stage{idx}' for idx in range(1 , len(snake_case_ ) + 1 )]
lowercase , lowercase =get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 72 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 0 |
import itertools
import math
def lowerCamelCase__ (_UpperCAmelCase):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(_UpperCAmelCase):
yield num
num += 1
def lowerCamelCase__ (_UpperCAmelCase = 1_0001):
return next(itertools.islice(prime_generator() , nth - 1 , _UpperCAmelCase))
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor']
SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | 0 |
def a__ ( snake_case ):
"""simple docstring"""
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def a__ ( snake_case ):
"""simple docstring"""
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | '''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = JukeboxTokenizer
lowerCAmelCase__ = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Optional[int] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCAmelCase__ : List[Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase__ : List[str] = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self : Dict ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Union[str, Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCAmelCase__ : Optional[Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase__ : str = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 75 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=snake_case ):
UpperCamelCase =["note_seq"]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str:
requires_backends(self , ['''note_seq'''] )
@classmethod
def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple:
requires_backends(cls , ['''note_seq'''] )
| 76 | '''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | 0 |
"""simple docstring"""
import sys
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = len(UpperCamelCase )
__UpperCAmelCase : str = [[0 for x in range(UpperCamelCase )] for x in range(UpperCamelCase )]
__UpperCAmelCase : Tuple = [[0 for x in range(UpperCamelCase )] for x in range(UpperCamelCase )]
for chain_length in range(2 , UpperCamelCase ):
for a in range(1 , n - chain_length + 1 ):
__UpperCAmelCase : Optional[Any] = a + chain_length - 1
__UpperCAmelCase : List[str] = sys.maxsize
for c in range(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Optional[int] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__UpperCAmelCase : Any = cost
__UpperCAmelCase : List[Any] = c
return matrix, sol
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if i == j:
print("A" + str(UpperCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(UpperCamelCase , UpperCamelCase , optimal_solution[i][j] )
print_optiomal_solution(UpperCamelCase , optimal_solution[i][j] + 1 , UpperCamelCase )
print(")" , end=" " )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : List[str] = [30, 35, 15, 5, 10, 20, 25]
__UpperCAmelCase : Any = len(UpperCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = matrix_chain_order(UpperCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(UpperCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 77 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
# load original model from timm
UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 | 0 |
SCREAMING_SNAKE_CASE__ : Dict = 6_55_21
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Tuple = 0
for plain_chr in plain_text:
UpperCAmelCase__ : int = (a + ord(__lowerCamelCase )) % MOD_ADLER
UpperCAmelCase__ : List[str] = (b + a) % MOD_ADLER
return (b << 16) | a
| 79 | '''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 0
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Tuple ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase (self : Optional[int] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase (self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : Optional[int] ):
class __A ( UpperCamelCase__ ):
a__ : str = True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 78 | 0 |
import math
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = 2
__lowercase = int(math.sqrt(lowerCamelCase ) ) # Size of every segment
__lowercase = [True] * (end + 1)
__lowercase = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCamelCase )
for i in range(start * start , end + 1 , lowerCamelCase ):
__lowercase = False
start += 1
prime += in_prime
__lowercase = end + 1
__lowercase = min(2 * end , lowerCamelCase )
while low <= n:
__lowercase = [True] * (high - low + 1)
for each in in_prime:
__lowercase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCamelCase , high + 1 , lowerCamelCase ):
__lowercase = False
for j in range(len(lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__lowercase = high + 1
__lowercase = min(high + end , lowerCamelCase )
return prime
print(sieve(10**6))
| 80 | '''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_snake_case : Dict = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : str , *lowerCamelCase : int , **lowerCamelCase : List[str] ) -> None:
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | '''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "num_attention_heads" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : int=13 , _UpperCAmelCase : Union[str, Any]=64 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Dict=[128, 256, 384] , _UpperCAmelCase : int=[4, 6, 8] , _UpperCAmelCase : Dict=[2, 3, 4] , _UpperCAmelCase : Union[str, Any]=[16, 16, 16] , _UpperCAmelCase : int=0 , _UpperCAmelCase : Tuple=[2, 2, 2] , _UpperCAmelCase : Tuple=[2, 2, 2] , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=2 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCAmelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = LevitModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCAmelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCAmelCase_ , UpperCAmelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCAmelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase_ = problem_type["title"]
UpperCAmelCase_ = problem_type["num_labels"]
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 | '''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **snake_case ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(snake_case )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase = kwargs.pop('tpu_name' , self.tpu_name )
lowercase = kwargs.pop('device_idx' , self.device_idx )
lowercase = kwargs.pop('eager_mode' , self.eager_mode )
lowercase = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**snake_case )
_UpperCamelCase : str = field(
default=__lowerCamelCase , metadata={"""help""": """Name of TPU"""} , )
_UpperCamelCase : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
_UpperCamelCase : bool = field(default=__lowerCamelCase , metadata={"""help""": """Benchmark models in eager model."""} )
_UpperCamelCase : bool = field(
default=__lowerCamelCase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
requires_backends(self , ['tf'] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
lowercase = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def SCREAMING_SNAKE_CASE__ ( self ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def SCREAMING_SNAKE_CASE__ ( self ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.n_gpu > 0
| 84 | '''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | 0 |
import requests
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "YOUR API KEY"
def _a ( lowercase__ : str , lowercase__ : str = giphy_api_key ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '+'.join(query.split() )
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = requests.get(lowercase__ ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 85 | '''simple docstring'''
import copy
import re
class __A :
a__ : Optional[int] = """hp"""
a__ : Optional[Any] = {}
a__ : List[Any] = None
@classmethod
def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def _lowercase (__a : List[Any] , __a : List[str] ):
if len(__a ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a : Union[str, Any] ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def _lowercase (__a : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def _lowercase (__a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def _lowercase (cls : Any ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
UpperCAmelCase_ = info
@classmethod
def _lowercase (cls : int , __a : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-"
UpperCAmelCase_ = f"""{key}{sep}{v}"""
name.append(__a )
return "_".join(__a )
@classmethod
def _lowercase (cls : Dict , __a : Dict ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 78 | 0 |
from collections import deque
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = len(__UpperCamelCase )
A_ = deque()
A_ = [False for _ in range(__UpperCamelCase )]
A_ = [-1 for _ in range(__UpperCamelCase )]
A_ = index_of[:]
def strong_connect(__UpperCamelCase : List[str] ,__UpperCamelCase : str ,__UpperCamelCase : Union[str, Any] ):
A_ = index # the number when this node is seen
A_ = index # lowest rank node reachable from here
index += 1
stack.append(__UpperCamelCase )
A_ = True
for w in g[v]:
if index_of[w] == -1:
A_ = strong_connect(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A_ = []
A_ = stack.pop()
A_ = False
component.append(__UpperCamelCase )
while w != v:
A_ = stack.pop()
A_ = False
component.append(__UpperCamelCase )
components.append(__UpperCamelCase )
return index
A_ = []
for v in range(__UpperCamelCase ):
if index_of[v] == -1:
strong_connect(__UpperCamelCase ,0 ,__UpperCamelCase )
return components
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = [[] for _ in range(__UpperCamelCase )]
for u, v in edges:
g[u].append(__UpperCamelCase )
return g
if __name__ == "__main__":
# Test
__a :Optional[int] = 7
__a :Optional[int] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__a :str = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__a :str = [(u, v) for u, v in zip(source, target)]
__a :int = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) | 86 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 0 |
_lowerCamelCase : Union[str, Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase : Dict = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 87 | '''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
UpperCAmelCase = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
UpperCAmelCase = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""")),
"""references""": datasets.Sequence(datasets.Value("""int32""")),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32"""),
"""references""": datasets.Value("""int32"""),
}) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE="binary" , SCREAMING_SNAKE_CASE=None) -> Optional[int]:
_lowerCamelCase : List[Any] = fa_score(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , pos_label=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE , sample_weight=SCREAMING_SNAKE_CASE)
return {"f1": float(SCREAMING_SNAKE_CASE) if score.size == 1 else score}
| 88 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[int] = set()
_lowercase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase : Optional[Any] = char
_lowercase : Union[str, Any] = set(lowerCamelCase_ )
return pairs
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase="<s>", lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", **lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, **lowerCamelCase, )
_lowercase : str = vocab_file
_lowercase : Any = merges_file
_lowercase : Optional[Any] = {}
_lowercase : Tuple = 0
_lowercase : List[Any] = 1
_lowercase : List[str] = 2
_lowercase : List[str] = 3
self.add_from_file(lowerCamelCase)
_lowercase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase, encoding='utf-8') as merges_handle:
_lowercase : Union[str, Any] = merges_handle.read().split('\n')[:-1]
_lowercase : Any = [tuple(merge.split()[:-1]) for merge in merges]
_lowercase : List[Any] = dict(zip(lowerCamelCase, range(len(lowerCamelCase))))
_lowercase : int = {}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
_lowercase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase)
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase)) + [1]
return [1] + ([0] * len(lowerCamelCase)) + [1, 1] + ([0] * len(lowerCamelCase)) + [1]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : List[Any] = [self.sep_token_id]
_lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return len(self.encoder)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder)
def UpperCamelCase ( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowercase : List[Any] = tuple(lowerCamelCase)
_lowercase : Dict = tuple(list(word[:-1]) + [word[-1] + '</w>'])
_lowercase : str = get_pairs(lowerCamelCase)
if not pairs:
return token
while True:
_lowercase : int = min(lowerCamelCase, key=lambda lowerCamelCase: self.bpe_ranks.get(lowerCamelCase, float('inf')))
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase : Dict = bigram
_lowercase : Optional[int] = []
_lowercase : Optional[int] = 0
while i < len(lowerCamelCase):
try:
_lowercase : Optional[Any] = word.index(lowerCamelCase, lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowercase : Any = j
if word[i] == first and i < len(lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowercase : str = tuple(lowerCamelCase)
_lowercase : Tuple = new_word
if len(lowerCamelCase) == 1:
break
else:
_lowercase : Tuple = get_pairs(lowerCamelCase)
_lowercase : Union[str, Any] = '@@ '.join(lowerCamelCase)
_lowercase : str = word[:-4]
_lowercase : int = word
return word
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = []
_lowercase : List[Any] = re.findall(R'\S+\n?', lowerCamelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase).split(' ')))
return split_tokens
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
return self.encoder.get(lowerCamelCase, self.encoder.get(self.unk_token))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
return self.decoder.get(lowerCamelCase, self.unk_token)
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ' '.join(lowerCamelCase).replace('@@ ', '').strip()
return out_string
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Optional[int] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
_lowercase : str = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase):
copyfile(self.vocab_file, lowerCamelCase)
if os.path.abspath(self.merges_file) != os.path.abspath(lowerCamelCase):
copyfile(self.merges_file, lowerCamelCase)
return out_vocab_file, out_merge_file
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if isinstance(lowerCamelCase, lowerCamelCase):
try:
with open(lowerCamelCase, 'r', encoding='utf-8') as fd:
self.add_from_file(lowerCamelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''')
return
_lowercase : Dict = f.readlines()
for lineTmp in lines:
_lowercase : int = lineTmp.strip()
_lowercase : Any = line.rfind(' ')
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'')
_lowercase : Optional[int] = line[:idx]
_lowercase : Tuple = len(self.encoder)
| 89 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """perceiver"""
def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-4
def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 78 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( A ) -> Any:
lowerCAmelCase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(A , A , bias=A )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def _snake_case ( A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> int:
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowerCAmelCase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowerCAmelCase__ = '''relu'''
lowerCAmelCase__ = state_dict['''decoder.embed_tokens.weight''']
lowerCAmelCase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowerCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 90 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]:
A = LlamaModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]:
A = True
A = LlamaModel(A_ )
model.to(A_ )
model.eval()
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,)
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,)
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]:
A = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]:
A = True
A = True
A = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,)
A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A = torch.cat([input_ids, next_tokens] ,dim=-1 )
A = torch.cat([input_mask, next_mask] ,dim=-1 )
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0]
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0]
# select random slice
A = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A = output_from_no_past[:, -3:, random_slice_idx].detach()
A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_lowerCamelCase: Any = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase: int = False
_lowerCamelCase: List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = LlamaModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(A_ )
A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = 'single_label_classification'
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(A_ )
A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = 'multi_label_classification'
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(A_ )
A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ids_tensor([1, 10] ,config.vocab_size )
A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A = LlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
A = original_model(A_ ).last_hidden_state
A = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A = {'type': scaling_type, 'factor': 10.0}
A = LlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
A = scaled_model(A_ ).last_hidden_state
A = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
A = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
A = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
A = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
A = model(torch.tensor(A_ ) )
A = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
A = 'Simply put, the theory of relativity states that '
A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
A = tokenizer.encode(A_ ,return_tensors='pt' )
A = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ )
# greedy generation outputs
A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ )
A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ )
self.assertEqual(A_ ,A_ ) | 91 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> list[list[int]]:
lowercase : list[list[int]] =[]
create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ )
return result
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__magic_name__ , total_number - level + 2 ):
current_list.append(__magic_name__ )
create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ )
current_list.pop()
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> None:
for i in total_list:
print(*__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = 4
UpperCamelCase_ = 2
UpperCamelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 92 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->tuple[int, int]:
"""simple docstring"""
try:
lowerCAmelCase__ :str = float(_SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError('Please enter a valid number' )
lowerCAmelCase__ :Optional[int] = decimal - int(_SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(_SCREAMING_SNAKE_CASE ), 1
else:
lowerCAmelCase__ :List[Any] = len(str(_SCREAMING_SNAKE_CASE ).split('.' )[1] )
lowerCAmelCase__ :List[str] = int(decimal * (10**number_of_frac_digits) )
lowerCAmelCase__ :List[str] = 10**number_of_frac_digits
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = denominator, numerator
while True:
lowerCAmelCase__ :Union[str, Any] = dividend % divisor
if remainder == 0:
break
lowerCAmelCase__ , lowerCAmelCase__ :int = divisor, remainder
lowerCAmelCase__ , lowerCAmelCase__ :Any = numerator / divisor, denominator / divisor
return int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 93 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 | '''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78 | 0 |
"""simple docstring"""
def snake_case ( A__ ,A__ ,A__ ,A__ ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCAmelCase_ : Optional[int] = mf_knapsack(i - 1 ,A__ ,A__ ,A__ )
else:
UpperCAmelCase_ : str = max(
mf_knapsack(i - 1 ,A__ ,A__ ,A__ ) ,mf_knapsack(i - 1 ,A__ ,A__ ,j - wt[i - 1] ) + val[i - 1] ,)
UpperCAmelCase_ : int = val
return f[i][j]
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 ,n + 1 ):
for w_ in range(1 ,w + 1 ):
if wt[i - 1] <= w_:
UpperCAmelCase_ : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] ,dp[i - 1][w_] )
else:
UpperCAmelCase_ : Optional[int] = dp[i - 1][w_]
return dp[n][w_], dp
def snake_case ( A__ ,A__ ,A__ ):
if not (isinstance(A__ ,(list, tuple) ) and isinstance(A__ ,(list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
UpperCAmelCase_ : str = len(A__ )
if num_items != len(A__ ):
UpperCAmelCase_ : Tuple = (
"The number of weights must be the same as the number of values.\n"
F"""But got {num_items} weights and {len(A__ )} values"""
)
raise ValueError(A__ )
for i in range(A__ ):
if not isinstance(wt[i] ,A__ ):
UpperCAmelCase_ : str = (
"All weights must be integers but got weight of "
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(A__ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ )
UpperCAmelCase_ : set = set()
_construct_solution(A__ ,A__ ,A__ ,A__ ,A__ )
return optimal_val, example_optional_set
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(A__ ,A__ ,i - 1 ,A__ ,A__ )
else:
optimal_set.add(A__ )
_construct_solution(A__ ,A__ ,i - 1 ,j - wt[i - 1] ,A__ )
if __name__ == "__main__":
lowerCamelCase_ = [3, 2, 4, 4]
lowerCamelCase_ = [4, 3, 2, 3]
lowerCamelCase_ = 4
lowerCamelCase_ = 6
lowerCamelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase_ , lowerCamelCase_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase_ , lowerCamelCase_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 95 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCamelCase = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def a ( __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any=None ) -> Tuple:
if rng is None:
__magic_name__: Union[str, Any] = random.Random()
__magic_name__: Tuple = 1
for dim in shape:
total_dims *= dim
__magic_name__: int = []
for _ in range(__UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__magic_name__: int = np.array(__UpperCAmelCase , dtype=jnp.intaa ).reshape(__UpperCAmelCase )
return output
def a ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int]=None ) -> int:
__magic_name__: Tuple = ids_tensor(__UpperCAmelCase , vocab_size=2 , rng=__UpperCAmelCase )
# make sure that at least one token is attended to for each batch
__magic_name__: List[str] = 1
return attn_mask
@require_flax
class __A :
UpperCAmelCase__ = None
UpperCAmelCase__ = ()
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__, __magic_name__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__magic_name__: Tuple = 2
__magic_name__: Tuple = inputs["""input_ids"""].shape[-1] // 2
__magic_name__: Tuple = inputs["""input_ids"""][:max_batch_size, :sequence_length]
__magic_name__: Any = jnp.ones_like(__snake_case )
__magic_name__: Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__magic_name__: int = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__magic_name__: Dict = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def lowerCamelCase__ ( self : str ) -> str:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Union[str, Any] = self._get_input_ids_and_config()
__magic_name__: Optional[Any] = False
__magic_name__: List[Any] = max_length
__magic_name__: Optional[Any] = 0
for model_class in self.all_generative_model_classes:
__magic_name__: Optional[Any] = model_class(__snake_case )
__magic_name__: Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__: Any = getattr(__snake_case , __snake_case )
__magic_name__: List[Any] = pt_model_class(__snake_case ).eval()
__magic_name__: Tuple = load_flax_weights_in_pytorch_model(__snake_case , flax_model.params )
__magic_name__: Any = flax_model.generate(__snake_case ).sequences
__magic_name__: int = pt_model.generate(torch.tensor(__snake_case , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__magic_name__: Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Dict = self._get_input_ids_and_config()
__magic_name__: List[Any] = False
__magic_name__: str = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Any = model_class(__snake_case )
__magic_name__: Optional[int] = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Union[str, Any] = jit(model.generate )
__magic_name__: int = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Optional[int] = self._get_input_ids_and_config()
__magic_name__: Union[str, Any] = True
__magic_name__: Any = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Any = model_class(__snake_case )
__magic_name__: Dict = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Optional[Any] = jit(model.generate )
__magic_name__: Tuple = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: str = self._get_input_ids_and_config()
__magic_name__: Tuple = False
__magic_name__: str = max_length
__magic_name__: List[str] = 2
for model_class in self.all_generative_model_classes:
__magic_name__: Optional[Any] = model_class(__snake_case )
__magic_name__: Dict = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Dict = jit(model.generate )
__magic_name__: List[Any] = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : str ) -> List[str]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: List[str] = self._get_input_ids_and_config()
__magic_name__: List[Any] = False
__magic_name__: int = max_length
__magic_name__: Tuple = 2
__magic_name__: Optional[int] = 2
for model_class in self.all_generative_model_classes:
__magic_name__: int = model_class(__snake_case )
__magic_name__: Any = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Tuple = self._get_input_ids_and_config()
__magic_name__: Any = True
__magic_name__: Optional[int] = max_length
__magic_name__: int = 0.8
__magic_name__: Any = 1_0
__magic_name__: Dict = 0.3
__magic_name__: List[Any] = 1
__magic_name__: Any = 8
__magic_name__: List[str] = 9
for model_class in self.all_generative_model_classes:
__magic_name__: Optional[int] = model_class(__snake_case )
__magic_name__: Optional[Any] = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Dict = jit(model.generate )
__magic_name__: int = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : str ) -> str:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Any = self._get_input_ids_and_config()
__magic_name__: Optional[Any] = max_length
__magic_name__: str = 1
__magic_name__: Tuple = 8
__magic_name__: str = 9
for model_class in self.all_generative_model_classes:
__magic_name__: Any = model_class(__snake_case )
__magic_name__: Dict = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: str = jit(model.generate )
__magic_name__: Tuple = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : List[str] ) -> str:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: List[str] = self._get_input_ids_and_config()
__magic_name__: Optional[int] = max_length
__magic_name__: List[str] = 2
__magic_name__: List[Any] = 1
__magic_name__: Dict = 8
__magic_name__: Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
__magic_name__: Any = model_class(__snake_case )
__magic_name__: List[Any] = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Tuple = jit(model.generate )
__magic_name__: Union[str, Any] = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Dict = self._get_input_ids_and_config()
# pad attention mask on the left
__magic_name__: int = attention_mask.at[(0, 0)].set(0 )
__magic_name__: int = False
__magic_name__: Tuple = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Tuple = model_class(__snake_case )
__magic_name__: Any = model.generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Dict = jit(model.generate )
__magic_name__: List[Any] = jit_generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
__magic_name__: str = attention_mask.at[(0, 0)].set(0 )
__magic_name__: Dict = True
__magic_name__: List[str] = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Union[str, Any] = model_class(__snake_case )
__magic_name__: Optional[Any] = model.generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: int = jit(model.generate )
__magic_name__: int = jit_generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
__magic_name__: Optional[int] = attention_mask.at[(0, 0)].set(0 )
__magic_name__: int = 2
__magic_name__: str = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Union[str, Any] = model_class(__snake_case )
__magic_name__: List[str] = model.generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Optional[int] = jit(model.generate )
__magic_name__: str = jit_generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__magic_name__: List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__magic_name__: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__magic_name__: Union[str, Any] = """Hello world"""
__magic_name__: List[str] = tokenizer(__snake_case , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__snake_case , """do_samples""" ):
model.generate(__snake_case , do_samples=__snake_case )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__snake_case , """foo""" ):
__magic_name__: List[str] = {"""foo""": """bar"""}
model.generate(__snake_case , **__snake_case )
| 96 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor']
SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__a = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__a = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__a = spec.loader.load_module()
__a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__a = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
__a = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def a ( ):
'''simple docstring'''
lowercase_ = []
for config_class in list(CONFIG_MAPPING.values() ):
lowercase_ = False
# source code of `config_class`
lowercase_ = inspect.getsource(snake_case__ )
lowercase_ = _re_checkpoint.findall(snake_case__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowercase_ , lowercase_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowercase_ = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowercase_ = True
break
lowercase_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case__ )
if len(snake_case__ ) > 0:
lowercase_ = '''\n'''.join(sorted(snake_case__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 97 | '''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase__ : List[str] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
lowercase__ : Any = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
@lru_cache()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = (
list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) )
)
_UpperCamelCase = bs[:]
_UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
_UpperCamelCase = [chr(lowercase ) for n in cs]
return dict(zip(lowercase, lowercase ) )
def a__ ( lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
return pairs
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]="replace" , lowerCAmelCase__ : Optional[int]="<s>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : List[str]="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : str="<pad>" , lowerCAmelCase__ : Optional[Any]="<mask>" , lowerCAmelCase__ : int=False , **lowerCAmelCase__ : List[Any] , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(lowerCAmelCase__ )
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = errors # how to handle errors in decoding
_UpperCamelCase = bytes_to_unicode()
_UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCamelCase = {}
_UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Any ) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(lowerCAmelCase__ )
_UpperCamelCase = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
_UpperCamelCase = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(lowerCAmelCase__ ):
try:
_UpperCamelCase = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase = tuple(lowerCAmelCase__ )
_UpperCamelCase = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
_UpperCamelCase = get_pairs(lowerCAmelCase__ )
_UpperCamelCase = ''' '''.join(lowerCAmelCase__ )
_UpperCamelCase = word
return word
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
_UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(''' ''' ) )
return bpe_tokens
def snake_case__ ( self : str , lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ''''''.join(lowerCAmelCase__ )
_UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '''\n''' )
_UpperCamelCase = 0
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase = token_index
writer.write(''' '''.join(lowerCAmelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple=False , **lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
_UpperCamelCase = ''' ''' + text
return (text, kwargs)
| 98 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78 | 0 |
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """WhisperFeatureExtractor"""
_lowerCamelCase = """WhisperTokenizer"""
def __init__( self , __A , __A ):
super().__init__(__A , __A )
__a = self.feature_extractor
__a = False
def snake_case_ ( self , __A=None , __A=None , __A=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self , *__A , **__A ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__a = kwargs.pop("""audio""" , __A )
__a = kwargs.pop("""sampling_rate""" , __A )
__a = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__a = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
__a = self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings["""input_ids"""]
return inputs
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.batch_decode(*__A , **__A )
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.decode(*__A , **__A )
def snake_case_ ( self , __A , __A="np" ):
return self.tokenizer.get_prompt_ids(__A , return_tensors=__A )
| 99 | '''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | 0 |
from math import sqrt
def __snake_case ( lowerCAmelCase_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( lowerCAmelCase_ = 1_0_0_0_1 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCAmelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCAmelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(F'{solution() = }')
| 100 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
# load original model from timm
UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 | 0 |
def a__ ( A__ ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCAmelCase__ : int =int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 101 | '''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 0
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Tuple ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase (self : Optional[int] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase (self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : Optional[int] ):
class __A ( UpperCamelCase__ ):
a__ : str = True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 78 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__magic_name__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__magic_name__ : Any = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=8 ):
UpperCamelCase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
UpperCamelCase : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if latents is None:
UpperCamelCase : int = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase : List[str] = latents.to(_A )
UpperCamelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def _a ( self , _A=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase : List[Any] = torch.device(f"""cuda:{gpu_id}""" )
UpperCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def _a ( self , _A=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCamelCase : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase : List[str] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
UpperCamelCase : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A = 5_1_2 , _A = 5_1_2 , _A = 1_0_0 , _A = 4.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self._execution_device
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
UpperCamelCase : List[str] = torch.cat(_A , dim=0 )
UpperCamelCase : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_A , _A ):
UpperCamelCase : Optional[int] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : List[str] = image_embeds.repeat_interleave(_A , dim=0 )
UpperCamelCase : str = negative_image_embeds.repeat_interleave(_A , dim=0 )
UpperCamelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
UpperCamelCase : str = self.scheduler.timesteps
UpperCamelCase : Optional[Any] = self.unet.config.in_channels
UpperCamelCase , UpperCamelCase : int = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
UpperCamelCase : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Union[str, Any] = {"""image_embeds""": image_embeds}
UpperCamelCase : Optional[Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase , UpperCamelCase : Optional[int] = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase : Union[str, Any] = variance_pred.chunk(2 )
UpperCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Optional[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
UpperCamelCase : int = self.movq.decode(_A , force_not_quantize=_A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase : Any = image * 0.5 + 0.5
UpperCamelCase : Optional[Any] = image.clamp(0 , 1 )
UpperCamelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 102 | '''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 | 0 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
snake_case = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
snake_case = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
snake_case = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
snake_case = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=[1, 1_0, 1_0_0] , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : int=3.0 ):
"""simple docstring"""
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=__lowerCamelCase ) as executor:
_snake_case = []
_snake_case = Counter()
_snake_case = 0
_snake_case = defaultdict(__lowerCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ):
for candidate in candidates:
_snake_case = candidate + '''\n''' + test_case
_snake_case = (test_program, timeout, task_id, completion_id[task_id])
_snake_case = executor.submit(__lowerCamelCase , *__lowerCamelCase )
futures.append(__lowerCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__lowerCamelCase ):
_snake_case = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
_snake_case , _snake_case = [], []
for result in results.values():
result.sort()
_snake_case = [r[1]['''passed'''] for r in result]
total.append(len(__lowerCamelCase ) )
correct.append(sum(__lowerCamelCase ) )
_snake_case = np.array(__lowerCamelCase )
_snake_case = np.array(__lowerCamelCase )
_snake_case = k
_snake_case = {f"""pass@{k}""": estimate_pass_at_k(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
def estimator(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = itertools.repeat(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
else:
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
_snake_case = iter(lowerCAmelCase_ )
return np.array([estimator(int(lowerCAmelCase_ ) , int(lowerCAmelCase_ ) , lowerCAmelCase_ ) for n, c in zip(lowerCAmelCase_ , lowerCAmelCase_ )] )
| 103 | '''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int = 100 ) -> int:
"""simple docstring"""
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 104 | '''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : int = XLNetTokenizer
__a : List[str] = XLNetTokenizerFast
__a : List[Any] = True
__a : Optional[Any] = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Tuple = XLNetTokenizer(snake_case__ ,keep_accents=snake_case__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = '<s>'
SCREAMING_SNAKE_CASE_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) ,snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'<eod>' )
self.assertEqual(len(snake_case__ ) ,1006 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = XLNetTokenizer(snake_case__ ,keep_accents=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,[285, 46, 10, 170, 382] )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = XLNetTokenizer(snake_case__ ,do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ ,[
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] ,)
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['▁he', 'll', 'o'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = XLNetTokenizer(snake_case__ ,do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] ,)
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode('sequence builders' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case ( self ):
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = {'input_ids': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ ,model_name='xlnet-base-cased' ,revision='c841166438c31ec7ca9a106dee7bb312b73ae511' ,)
| 105 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__snake_case :Any ={
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class lowerCAmelCase__ :
def __init__( self : Dict , __UpperCamelCase : int = 14 ) -> None:
if group not in primes:
raise ValueError('Unsupported Group' )
A = primes[group]['prime']
A = primes[group]['generator']
A = int(hexlify(urandom(32 ) ) , base=16 )
def __UpperCamelCase ( self : Dict ) -> str:
return hex(self.__private_key )[2:]
def __UpperCamelCase ( self : List[Any] ) -> str:
A = pow(self.generator , self.__private_key , self.prime )
return hex(__UpperCamelCase )[2:]
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : str ) -> str:
A = int(__UpperCamelCase , base=16 )
if not self.is_valid_public_key(__UpperCamelCase ):
raise ValueError('Invalid public key' )
A = pow(__UpperCamelCase , self.__private_key , self.prime )
return shaaaa(str(__UpperCamelCase ).encode() ).hexdigest()
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__UpperCamelCase , (prime - 1) // 2 , __UpperCamelCase ) == 1
)
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 14 ) -> str:
A = int(__UpperCamelCase , base=16 )
A = int(__UpperCamelCase , base=16 )
A = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('Invalid public key' )
A = pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return shaaaa(str(__UpperCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 | '''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : int = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "efficientnet"
def __init__( self : List[Any], UpperCamelCase__ : int = 3, UpperCamelCase__ : int = 6_00, UpperCamelCase__ : float = 2.0, UpperCamelCase__ : float = 3.1, UpperCamelCase__ : int = 8, UpperCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3], UpperCamelCase__ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92], UpperCamelCase__ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20], UpperCamelCase__ : List[int] = [], UpperCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1], UpperCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1], UpperCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6], UpperCamelCase__ : float = 0.25, UpperCamelCase__ : str = "swish", UpperCamelCase__ : int = 25_60, UpperCamelCase__ : str = "mean", UpperCamelCase__ : float = 0.02, UpperCamelCase__ : float = 0.001, UpperCamelCase__ : float = 0.99, UpperCamelCase__ : float = 0.5, UpperCamelCase__ : float = 0.2, **UpperCamelCase__ : List[str], ) -> int:
super().__init__(**UpperCamelCase__ )
_A = num_channels
_A = image_size
_A = width_coefficient
_A = depth_coefficient
_A = depth_divisor
_A = kernel_sizes
_A = in_channels
_A = out_channels
_A = depthwise_padding
_A = strides
_A = num_block_repeats
_A = expand_ratios
_A = squeeze_expansion_ratio
_A = hidden_act
_A = hidden_dim
_A = pooling_type
_A = initializer_range
_A = batch_norm_eps
_A = batch_norm_momentum
_A = dropout_rate
_A = drop_connect_rate
_A = sum(UpperCamelCase__ ) * 4
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = version.parse("1.11" )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : str ) -> float:
return 1e-5
| 107 | '''simple docstring'''
import copy
import re
class __A :
a__ : Optional[int] = """hp"""
a__ : Optional[Any] = {}
a__ : List[Any] = None
@classmethod
def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def _lowercase (__a : List[Any] , __a : List[str] ):
if len(__a ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a : Union[str, Any] ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def _lowercase (__a : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def _lowercase (__a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def _lowercase (cls : Any ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
UpperCAmelCase_ = info
@classmethod
def _lowercase (cls : int , __a : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-"
UpperCAmelCase_ = f"""{key}{sep}{v}"""
name.append(__a )
return "_".join(__a )
@classmethod
def _lowercase (cls : Dict , __a : Dict ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 78 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
model.train()
_UpperCAmelCase = model(__snake_case )
_UpperCAmelCase = F.mse_loss(__snake_case , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case=False ) -> Optional[int]:
set_seed(4_2 )
_UpperCAmelCase = RegressionModel()
_UpperCAmelCase = deepcopy(__snake_case )
_UpperCAmelCase = RegressionDataset(length=8_0 )
_UpperCAmelCase = DataLoader(__snake_case , batch_size=1_6 )
model.to(accelerator.device )
if sched:
_UpperCAmelCase = AdamW(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_UpperCAmelCase = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.65 )
_UpperCAmelCase = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.65 )
# Make a copy of `model`
if sched:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(__snake_case , __snake_case , __snake_case , __snake_case )
else:
_UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(__snake_case , __snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Dict:
# Test when on a single CPU or GPU that the context manager does nothing
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_training_setup(__snake_case )
# Use a single batch
_UpperCAmelCase , _UpperCAmelCase = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase , _UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
_UpperCAmelCase = ddp_input[torch.randperm(len(__snake_case ) )]
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
# Test on distributed setup that context manager behaves properly
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_training_setup(__snake_case )
# Use a single batch
_UpperCAmelCase , _UpperCAmelCase = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase , _UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
_UpperCAmelCase = ddp_input[torch.randperm(len(__snake_case ) )]
def _SCREAMING_SNAKE_CASE ( __snake_case=False , __snake_case=False ) -> Any:
_UpperCAmelCase = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_training_setup(__snake_case )
for iteration, batch in enumerate(__snake_case ):
_UpperCAmelCase , _UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase , _UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
_UpperCAmelCase = ddp_input[torch.randperm(len(__snake_case ) )]
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( __snake_case=False , __snake_case=False ) -> Dict:
_UpperCAmelCase = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_training_setup(__snake_case , __snake_case )
for iteration, batch in enumerate(__snake_case ):
_UpperCAmelCase , _UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase , _UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
_UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_UpperCAmelCase = Accelerator()
_UpperCAmelCase = RegressionDataset(length=8_0 )
_UpperCAmelCase = DataLoader(__snake_case , batch_size=1_6 )
_UpperCAmelCase = RegressionDataset(length=9_6 )
_UpperCAmelCase = DataLoader(__snake_case , batch_size=1_6 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(__snake_case , __snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if iteration < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if batch_num < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_UpperCAmelCase = Accelerator()
_UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(__snake_case , __snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 108 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 0 |
'''simple docstring'''
import numpy as np
class __a :
def __init__( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
def __eq__( self : int ,lowerCamelCase : List[str] ):
'''simple docstring'''
return self.position == cell.position
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
print(self.position )
class __a :
def __init__( self : int ,lowerCamelCase : Optional[int]=(5, 5) ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.zeros(lowerCamelCase )
__SCREAMING_SNAKE_CASE = world_size[0]
__SCREAMING_SNAKE_CASE = world_size[1]
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
print(self.w )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__SCREAMING_SNAKE_CASE = cell.position[0]
__SCREAMING_SNAKE_CASE = cell.position[1]
__SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
__SCREAMING_SNAKE_CASE = current_x + n[0]
__SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__SCREAMING_SNAKE_CASE = Cell()
__SCREAMING_SNAKE_CASE = (x, y)
__SCREAMING_SNAKE_CASE = cell
neighbours.append(lowerCamelCase )
return neighbours
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
_open.append(__UpperCAmelCase )
while _open:
__SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
__SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(__UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(__UpperCAmelCase ):
for c in _closed:
if c == n:
continue
__SCREAMING_SNAKE_CASE = current.g + 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = n.position
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = goal.position
__SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
__SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
__SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a = Gridworld()
# Start position and goal
a = Cell()
a = (0, 0)
a = Cell()
a = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
a = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a = 1
print(world.w)
| 109 | '''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
if config_name_or_path is None:
UpperCAmelCase__ : int = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
UpperCAmelCase__ : int = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase__ : Tuple = question_encoder_name_or_path
UpperCAmelCase__ : Tuple = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
UpperCAmelCase__ : Tuple = RagConfig.from_pretrained(_snake_case )
UpperCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
UpperCAmelCase__ : List[str] = gen_config
UpperCAmelCase__ : Dict = question_encoder_config
UpperCAmelCase__ : List[Any] = model_class.from_pretrained_question_encoder_generator(
_snake_case ,_snake_case ,config=_snake_case )
rag_model.save_pretrained(_snake_case )
# Sanity check.
model_class.from_pretrained(_snake_case )
# Save tokenizers.
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_snake_case )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_snake_case )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 110 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = WavaVecaPhonemeCTCTokenizer
_SCREAMING_SNAKE_CASE = False
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
snake_case_ = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
snake_case_ = dict(zip(__a , range(len(__a ) ) ) )
snake_case_ = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : str=False , _lowerCAmelCase : Union[str, Any]=2_0 , _lowerCAmelCase : str=5 ) -> Optional[int]:
"""simple docstring"""
snake_case_ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__a )) for i in range(len(__a ) )]
snake_case_ = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
snake_case_ = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
snake_case_ = toks + toks
# toks_str = [t[1] for t in toks]
snake_case_ = [t[0] for t in toks]
# Ensure consistency
snake_case_ = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
snake_case_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
snake_case_ = " " + output_txt
snake_case_ = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def lowerCAmelCase__ ( self : Optional[Any] , **_lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
snake_case_ = tokenizer("m xxx ɪ" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
snake_case_ = tokenizer("m aaa ɪ ccc" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
snake_case_ = tokenizer("maɪ c" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [3, 2_0_0] ) # mai should be <unk> (=3)
def lowerCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ = "Hello how are you"
snake_case_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(__a , "h ə l oʊ h aʊ ɑːɹ j uː" )
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ = "Hello how are you"
snake_case_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def lowerCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ = "Hello how are you"
snake_case_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
snake_case_ = tokenizer.decode(tokenizer(__a ).input_ids )
self.assertEqual(__a , __a )
def lowerCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
snake_case_ = tokenizer.decode(sample_ids[0] )
snake_case_ = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def lowerCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
snake_case_ = "Hello how are you"
snake_case_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(__a , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def lowerCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
snake_case_ = "Hello how are you"
snake_case_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
snake_case_ = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
snake_case_ = tokenizer.decode(sample_ids[0] )
snake_case_ = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
snake_case_ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__a )
snake_case_ = tokenizer.batch_decode(__a , filter_word_delimiter_token=__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def lowerCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
snake_case_ = "Hello how are you"
snake_case_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
snake_case_ = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(__a , __a )
def lowerCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
snake_case_ = "Hello how are you"
snake_case_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
snake_case_ = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __a )
def lowerCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__a )
snake_case_ = "Hello how are you"
snake_case_ = tokenizer(__a , phonemizer_lang="en-us" ).input_ids
snake_case_ = tokenizer(__a , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__a , __a )
snake_case_ = tokenizer.decode(__a )
snake_case_ = tokenizer.decode(__a )
self.assertEqual(__a , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__a , "ɛ l o h aʊ a ʁ j u" )
def lowerCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ = "Hello how Are you"
snake_case_ = "hello how are you"
snake_case_ = tokenizer(__a ).input_ids
snake_case_ = tokenizer(__a ).input_ids
self.assertEqual(__a , __a )
def lowerCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
snake_case_ = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
snake_case_ = tokenizer.batch_decode(__a )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case_ = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case_ = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
snake_case_ = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
snake_case_ = tokenizer.decode(__a , output_char_offsets=__a , filter_word_delimiter_token=__a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def lowerCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(_lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
self.assertTrue(isinstance(__a , __a ) )
self.assertTrue(isinstance(outputs_list[0] , __a ) )
# transform list to ModelOutput
snake_case_ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
if isinstance(__a , __a ):
[recursive_check(__a , __a ) for la, la in zip(__a , __a )]
self.assertEqual(__a , __a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
snake_case_ = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
snake_case_ = tokenizer.batch_decode(__a , output_char_offsets=__a )
snake_case_ = [tokenizer.decode(__a , output_char_offsets=__a ) for ids in sample_ids]
check_list_tuples_equal(__a , __a )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
snake_case_ = tokenizer.add_tokens(__a )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
snake_case_ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
snake_case_ = tokenizer.add_special_tokens(__a )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
snake_case_ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def lowerCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def lowerCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
snake_case_ = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
snake_case_ = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(output["text"] , __a )
| 283 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """perceiver"""
def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-4
def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 78 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCamelCase__ :int = TypeVar("""T""")
class A( Generic[T] ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :int = data
_UpperCamelCase :Optional[Any] = None
def __str__( self ) -> Optional[Any]:
"""simple docstring"""
return f"{self.data}"
class A( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :int = None
def __iter__( self ) -> str:
"""simple docstring"""
_UpperCamelCase :List[Any] = self.top
while node:
yield node.data
_UpperCamelCase :Dict = node.next
def __str__( self ) -> int:
"""simple docstring"""
return "->".join([str(__a ) for item in self] )
def __len__( self ) -> Dict:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
return self.top is None
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_UpperCamelCase :int = Node(__a )
if not self.is_empty():
_UpperCamelCase :List[str] = self.top
_UpperCamelCase :Union[str, Any] = node
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __a )
_UpperCamelCase :List[str] = self.top
_UpperCamelCase :Tuple = self.top.next
return pop_node.data
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=4_00 , A=True , A=32 , A=True , ) -> Tuple:
A: Dict = parent
A: List[str] = batch_size
A: Optional[Any] = num_channels
A: List[Any] = image_size
A: Tuple = min_resolution
A: str = max_resolution
A: int = do_resize
A: Optional[Any] = size_divisor
A: Union[str, Any] = do_rescale
def a__ ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A__ : str = GLPNImageProcessor if is_vision_available() else None
def a__ ( self ) -> List[str]:
A: Optional[int] = GLPNImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
A: Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size_divisor""" ) )
self.assertTrue(hasattr(__a , """resample""" ) )
self.assertTrue(hasattr(__a , """do_rescale""" ) )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
A: List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A: Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def a__ ( self ) -> Any:
# Initialize image_processing
A: int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A: Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def a__ ( self ) -> Tuple:
# Initialize image_processing
A: List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A: Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 135 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=3 , a__=224 , a__=30 , a__=400 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
A = size if size is not None else {"""height""": 18, """width""": 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
A = image_mean
A = image_std
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
A = EfficientFormerImageProcessorTester(self )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
A = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A = image_processor(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
A = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A = image_processor(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self ) -> Optional[int]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
A = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A = image_processor(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 641 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
snake_case = AutoencoderKL
snake_case = """sample"""
snake_case = 1E-2
@property
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = 4
lowerCamelCase__ = 3
lowerCamelCase__ = (32, 32)
lowerCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def __UpperCAmelCase ( self : Any ):
return (3, 32, 32)
@property
def __UpperCAmelCase ( self : Dict ):
return (3, 32, 32)
def __UpperCAmelCase ( self : int ):
lowerCamelCase__ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowerCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : int ):
pass
def __UpperCAmelCase ( self : int ):
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def __UpperCAmelCase ( self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
lowerCamelCase__ , lowerCamelCase__ = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase__ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
lowerCamelCase__ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCamelCase__ = torch.randn_like(__a )
lowerCamelCase__ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCamelCase__ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCamelCase__ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCamelCase__ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowerCamelCase__ = dict(model.named_parameters() )
lowerCamelCase__ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__a )
lowerCamelCase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
lowerCamelCase__ = model.to(__a )
model.eval()
if torch_device == "mps":
lowerCamelCase__ = torch.manual_seed(0 )
else:
lowerCamelCase__ = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase__ = image.to(__a )
with torch.no_grad():
lowerCamelCase__ = model(__a , sample_posterior=__a , generator=__a ).sample
lowerCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCamelCase__ = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
lowerCamelCase__ = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
lowerCamelCase__ = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(__a , __a , rtol=1e-2 ) )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def __UpperCAmelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : str=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : List[str]=False ):
lowerCamelCase__ = torch.floataa if fpaa else torch.floataa
lowerCamelCase__ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : List[Any]=False ):
lowerCamelCase__ = """fp16""" if fpaa else None
lowerCamelCase__ = torch.floataa if fpaa else torch.floataa
lowerCamelCase__ = AutoencoderKL.from_pretrained(
__a , subfolder="""vae""" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCamelCase__ = self.get_sd_vae_model()
lowerCamelCase__ = self.get_sd_image(__a )
lowerCamelCase__ = self.get_generator(__a )
with torch.no_grad():
lowerCamelCase__ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
lowerCamelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCamelCase__ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__a , __a , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
lowerCamelCase__ = self.get_sd_vae_model(fpaa=__a )
lowerCamelCase__ = self.get_sd_image(__a , fpaa=__a )
lowerCamelCase__ = self.get_generator(__a )
with torch.no_grad():
lowerCamelCase__ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
lowerCamelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCamelCase__ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ = self.get_sd_vae_model()
lowerCamelCase__ = self.get_sd_image(__a )
with torch.no_grad():
lowerCamelCase__ = model(__a ).sample
assert sample.shape == image.shape
lowerCamelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCamelCase__ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__a , __a , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCamelCase__ = self.get_sd_vae_model()
lowerCamelCase__ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCamelCase__ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCamelCase__ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCamelCase__ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = self.get_sd_vae_model(fpaa=__a )
lowerCamelCase__ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
lowerCamelCase__ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCamelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCamelCase__ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
lowerCamelCase__ = self.get_sd_vae_model(fpaa=__a )
lowerCamelCase__ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
lowerCamelCase__ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCamelCase__ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCamelCase__ = self.get_sd_vae_model()
lowerCamelCase__ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCamelCase__ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCamelCase__ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ = self.get_sd_vae_model()
lowerCamelCase__ = self.get_sd_image(__a )
lowerCamelCase__ = self.get_generator(__a )
with torch.no_grad():
lowerCamelCase__ = model.encode(__a ).latent_dist
lowerCamelCase__ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCamelCase__ = sample[0, -1, -3:, -3:].flatten().cpu()
lowerCamelCase__ = torch.tensor(__a )
lowerCamelCase__ = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(__a , __a , atol=__a )
| 129 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A = logging.get_logger(__name__)
__A = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class snake_case ( UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = """van"""
def __init__( self : List[Any] , UpperCamelCase__ : Tuple=2_2_4 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Any=[7, 3, 3, 3] , UpperCamelCase__ : Optional[int]=[4, 2, 2, 2] , UpperCamelCase__ : Optional[int]=[6_4, 1_2_8, 3_2_0, 5_1_2] , UpperCamelCase__ : List[Any]=[3, 3, 1_2, 3] , UpperCamelCase__ : Optional[int]=[8, 8, 4, 4] , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : str=1e-6 , UpperCamelCase__ : Any=1e-2 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , **UpperCamelCase__ : Dict , )-> int:
'''simple docstring'''
super().__init__(**__a)
__lowerCAmelCase: Dict = image_size
__lowerCAmelCase: Tuple = num_channels
__lowerCAmelCase: Optional[Any] = patch_sizes
__lowerCAmelCase: str = strides
__lowerCAmelCase: Union[str, Any] = hidden_sizes
__lowerCAmelCase: Union[str, Any] = depths
__lowerCAmelCase: Union[str, Any] = mlp_ratios
__lowerCAmelCase: Optional[Any] = hidden_act
__lowerCAmelCase: Any = initializer_range
__lowerCAmelCase: List[str] = layer_norm_eps
__lowerCAmelCase: List[str] = layer_scale_init_value
__lowerCAmelCase: Tuple = drop_path_rate
__lowerCAmelCase: str = dropout_rate
| 346 | '''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__magic_name__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 0 |
__UpperCamelCase : Optional[int] = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 519 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor']
SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | 0 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[int] ) -> List[str]:
_lowerCamelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
_lowerCamelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
_lowerCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
_lowerCamelCase = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> Optional[Any]:
if "visual_encoder" in key:
_lowerCamelCase = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case_ )
if "blocks" in key:
_lowerCamelCase = re.sub(r'''blocks''' , '''layers''' , snake_case_ )
if "attn" in key:
_lowerCamelCase = re.sub(r'''attn''' , '''self_attn''' , snake_case_ )
if "norm1" in key:
_lowerCamelCase = re.sub(r'''norm1''' , '''layer_norm1''' , snake_case_ )
if "norm2" in key:
_lowerCamelCase = re.sub(r'''norm2''' , '''layer_norm2''' , snake_case_ )
if "encoder.norm" in key:
_lowerCamelCase = re.sub(r'''encoder.norm''' , '''post_layernorm''' , snake_case_ )
if "encoder.patch_embed.proj" in key:
_lowerCamelCase = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case_ )
if "encoder.pos_embed" in key:
_lowerCamelCase = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case_ )
if "encoder.cls_token" in key:
_lowerCamelCase = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case_ )
if "self_attn" in key:
_lowerCamelCase = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Any=None ) -> Union[str, Any]:
if config_path is not None:
_lowerCamelCase = BlipConfig.from_pretrained(snake_case_ )
else:
_lowerCamelCase = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
_lowerCamelCase = BlipForConditionalGeneration(snake_case_ ).eval()
_lowerCamelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
_lowerCamelCase = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit='''base''' )
_lowerCamelCase = pt_model.eval()
_lowerCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase = modified_state_dict.pop(snake_case_ )
_lowerCamelCase = rename_key(snake_case_ )
_lowerCamelCase = value
hf_model.load_state_dict(snake_case_ )
_lowerCamelCase = 3_84
_lowerCamelCase = load_demo_image(image_size=snake_case_ , device='''cpu''' )
_lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_lowerCamelCase = tokenizer(['''a picture of'''] ).input_ids
_lowerCamelCase = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_lowerCamelCase = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_lowerCamelCase = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
_lowerCamelCase = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
vqa_model.eval()
_lowerCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase = modified_state_dict.pop(snake_case_ )
_lowerCamelCase = rename_key(snake_case_ )
_lowerCamelCase = value
_lowerCamelCase = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
_lowerCamelCase = ['''How many dogs are in this image?''']
_lowerCamelCase = tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids
_lowerCamelCase = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
_lowerCamelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
_lowerCamelCase = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
itm_model.eval()
_lowerCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase = modified_state_dict.pop(snake_case_ )
_lowerCamelCase = rename_key(snake_case_ )
_lowerCamelCase = value
_lowerCamelCase = BlipForImageTextRetrieval(snake_case_ )
_lowerCamelCase = ['''A picture of a woman with a dog sitting in a beach''']
_lowerCamelCase = tokenizer(
snake_case_ , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
_lowerCamelCase = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
_lowerCamelCase = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 661 | '''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = AlbertTokenizer
lowerCAmelCase__ : int = AlbertTokenizerFast
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : int = True
def _UpperCAmelCase ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = "this is a test"
__UpperCAmelCase = "this is a test"
return input_text, output_text
def _UpperCAmelCase ( self: str ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = "<pad>"
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _UpperCAmelCase ( self: int ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__a ) , 30_000 )
def _UpperCAmelCase ( self: Tuple ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _UpperCAmelCase ( self: Tuple ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = "I was born in 92000, and this is falsé."
__UpperCAmelCase = tokenizer.tokenize(__a )
__UpperCAmelCase = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = tokenizer.encode(__a , add_special_tokens=__a )
__UpperCAmelCase = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(__a )
__UpperCAmelCase = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _UpperCAmelCase ( self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = AlbertTokenizer(__a , keep_accents=__a )
__UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
__UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = AlbertTokenizer(__a )
__UpperCAmelCase = tokenizer.encode("sequence builders" )
__UpperCAmelCase = tokenizer.encode("multi-sequence build" )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _UpperCAmelCase ( self: Tuple ) -> int:
'''simple docstring'''
__UpperCAmelCase = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 221 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase ( UpperCamelCase__ ):
def __init__( self : Any , UpperCamelCase : TransformeraDModel , UpperCamelCase : AutoencoderKL , UpperCamelCase : KarrasDiffusionSchedulers , UpperCamelCase : Optional[Dict[int, str]] = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=__a , vae=__a , scheduler=__a )
# create a imagenet -> id dictionary for easier use
lowerCAmelCase__ : Dict = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
lowerCAmelCase__ : Optional[int] = int(__a )
lowerCAmelCase__ : Dict = dict(sorted(self.labels.items() ) )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, List[str]] ) -> Dict:
"""simple docstring"""
if not isinstance(__a , __a ):
lowerCAmelCase__ : str = list(__a )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : float = 4.0 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : int = 50 , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = len(__a )
lowerCAmelCase__ : Tuple = self.transformer.config.sample_size
lowerCAmelCase__ : List[Any] = self.transformer.config.in_channels
lowerCAmelCase__ : Any = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__a , device=self.device , dtype=self.transformer.dtype , )
lowerCAmelCase__ : int = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCAmelCase__ : Optional[int] = torch.tensor(__a , device=self.device ).reshape(-1 )
lowerCAmelCase__ : Tuple = torch.tensor([10_00] * batch_size , device=self.device )
lowerCAmelCase__ : List[str] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCAmelCase__ : Union[str, Any] = latent_model_input[: len(__a ) // 2]
lowerCAmelCase__ : Any = torch.cat([half, half] , dim=0 )
lowerCAmelCase__ : Optional[int] = self.scheduler.scale_model_input(__a , __a )
lowerCAmelCase__ : str = t
if not torch.is_tensor(__a ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCAmelCase__ : List[Any] = latent_model_input.device.type == """mps"""
if isinstance(__a , __a ):
lowerCAmelCase__ : Dict = torch.floataa if is_mps else torch.floataa
else:
lowerCAmelCase__ : Dict = torch.intaa if is_mps else torch.intaa
lowerCAmelCase__ : Optional[Any] = torch.tensor([timesteps] , dtype=__a , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCAmelCase__ : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase__ : Optional[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCAmelCase__ : int = self.transformer(
__a , timestep=__a , class_labels=__a ).sample
# perform guidance
if guidance_scale > 1:
lowerCAmelCase__ , lowerCAmelCase__ : str = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = torch.split(__a , len(__a ) // 2 , dim=0 )
lowerCAmelCase__ : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCAmelCase__ : Optional[int] = torch.cat([half_eps, half_eps] , dim=0 )
lowerCAmelCase__ : Any = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = torch.split(__a , __a , dim=1 )
else:
lowerCAmelCase__ : Union[str, Any] = noise_pred
# compute previous image: x_t -> x_t-1
lowerCAmelCase__ : int = self.scheduler.step(__a , __a , __a ).prev_sample
if guidance_scale > 1:
lowerCAmelCase__ , lowerCAmelCase__ : Any = latent_model_input.chunk(2 , dim=0 )
else:
lowerCAmelCase__ : List[Any] = latent_model_input
lowerCAmelCase__ : Optional[int] = 1 / self.vae.config.scaling_factor * latents
lowerCAmelCase__ : Dict = self.vae.decode(__a ).sample
lowerCAmelCase__ : List[Any] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ : Tuple = self.numpy_to_pil(__a )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__a )
| 299 | '''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | 0 |
import copy
import re
class __lowerCAmelCase :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """hp"""
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = None
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
if len(__a ) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_lowerCAmelCase : Union[str, Any] ):
snake_case_ = ""
while integer != 0:
snake_case_ = chr(ord("A" ) + integer % 1_0 ) + s
integer //= 1_0
return s
snake_case_ = 0
while True:
snake_case_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = param_name.split("_" )
snake_case_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ["", "_"]
for separator in separators:
snake_case_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = TrialShortNamer.shortname_for_key(__a , __a )
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def lowerCAmelCase__ ( cls : Any ) -> Dict:
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
snake_case_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
snake_case_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
snake_case_ = info
@classmethod
def lowerCAmelCase__ ( cls : int , _lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
snake_case_ = 1 if v else 0
snake_case_ = "" if isinstance(__a , (int, float) ) else "-"
snake_case_ = F'''{key}{sep}{v}'''
name.append(__a )
return "_".join(__a )
@classmethod
def lowerCAmelCase__ ( cls : Dict , _lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
snake_case_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split("_" )
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split("-" )
else:
snake_case_ = re.sub("[0-9.]" , "" , __a )
snake_case_ = float(re.sub("[^0-9.]" , "" , __a ) )
snake_case_ = cls.NAMING_INFO["reverse_short_param"][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 283 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
# load original model from timm
UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 | 0 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A( UpperCamelCase__ ):
"""simple docstring"""
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__a , '''num_heads''' ) )
class A:
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=[16, 48, 96] , SCREAMING_SNAKE_CASE__=[1, 3, 6] , SCREAMING_SNAKE_CASE__=[1, 2, 10] , SCREAMING_SNAKE_CASE__=[7, 3, 3] , SCREAMING_SNAKE_CASE__=[4, 2, 2] , SCREAMING_SNAKE_CASE__=[2, 1, 1] , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=[False, False, True] , SCREAMING_SNAKE_CASE__=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=2 , ) -> int:
"""simple docstring"""
_UpperCamelCase :str = parent
_UpperCamelCase :List[Any] = batch_size
_UpperCamelCase :Any = image_size
_UpperCamelCase :Dict = patch_sizes
_UpperCamelCase :Tuple = patch_stride
_UpperCamelCase :Dict = patch_padding
_UpperCamelCase :List[Any] = is_training
_UpperCamelCase :Optional[Any] = use_labels
_UpperCamelCase :List[str] = num_labels
_UpperCamelCase :int = num_channels
_UpperCamelCase :Any = embed_dim
_UpperCamelCase :str = num_heads
_UpperCamelCase :Any = stride_kv
_UpperCamelCase :Union[str, Any] = depth
_UpperCamelCase :Dict = cls_token
_UpperCamelCase :List[str] = attention_drop_rate
_UpperCamelCase :str = initializer_range
_UpperCamelCase :Any = layer_norm_eps
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase :Any = None
if self.use_labels:
_UpperCamelCase :Any = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase :List[str] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
_UpperCamelCase :str = CvtModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase :Dict = model(__a )
_UpperCamelCase :str = (self.image_size, self.image_size)
_UpperCamelCase , _UpperCamelCase :Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCamelCase :List[str] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCamelCase :List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
_UpperCamelCase :int = self.num_labels
_UpperCamelCase :List[Any] = CvtForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase :Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Tuple = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Any = config_and_inputs
_UpperCamelCase :Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
A = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
A = False
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Dict = CvtModelTester(self )
_UpperCamelCase :Optional[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase :str = model_class(__a )
_UpperCamelCase :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase :Tuple = [*signature.parameters.keys()]
_UpperCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a )
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase :Optional[int] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase :Optional[int] = outputs.hidden_states
_UpperCamelCase :Dict = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCamelCase , _UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase :Dict = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase :List[Any] = True
check_hidden_states_output(__a , __a , __a )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase :Union[str, Any] = CvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def A_ ( ) -> Union[str, Any]:
_UpperCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Dict = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__a )
_UpperCamelCase :Dict = self.default_image_processor
_UpperCamelCase :str = prepare_img()
_UpperCamelCase :Tuple = image_processor(images=__a , return_tensors='''pt''' ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase :Dict = model(**__a )
# verify the logits
_UpperCamelCase :int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase :Tuple = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 355 | '''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 0
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Tuple ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase (self : Optional[int] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase (self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : Optional[int] ):
class __A ( UpperCamelCase__ ):
a__ : str = True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 78 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__SCREAMING_SNAKE_CASE : List[Any] =logging.getLogger()
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Path , lowerCamelCase__ : list ):
'''simple docstring'''
A: List[Any] = """\n""".join(snake_case_ )
Path(snake_case_ ).open("""w""" ).writelines(snake_case_ )
__SCREAMING_SNAKE_CASE : Any ='patrickvonplaten/t5-tiny-random'
__SCREAMING_SNAKE_CASE : Dict ='sshleifer/bart-tiny-random'
__SCREAMING_SNAKE_CASE : Any ='sshleifer/tiny-mbart'
__SCREAMING_SNAKE_CASE : Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
def a__ ( self , A ) -> Any:
A: Dict = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
A: Tuple = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
A: Optional[Any] = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(__a , __a )
A: Dict = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
A: Optional[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
A: Dict = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(__a , """argv""" , __a ):
run_generate()
assert Path(__a ).exists()
# os.remove(Path(output_file_name))
def a__ ( self ) -> Tuple:
self.run_eval_tester(__a )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def a__ ( self , A ) -> List[Any]:
self.run_eval_tester(__a )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def a__ ( self , A ) -> Tuple:
A: str = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
A: List[str] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
A: Dict = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
A: str = Path(self.get_auto_remove_tmp_dir() )
A: Optional[int] = str(tmp_dir / """scores.json""" )
A: Optional[Any] = str(tmp_dir / """val.target""" )
_dump_articles(__a , text["""en"""] )
_dump_articles(__a , text["""de"""] )
A: str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
A: Optional[int] = f'\n run_eval_search.py\n {model}\n {str(__a )}\n {str(__a )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(__a , """argv""" , __a ):
with CaptureStdout() as cs:
run_search()
A: Any = [""" num_beams | length_penalty""", model, """Best score args"""]
A: Union[str, Any] = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(__a )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__a ).exists()
os.remove(Path(__a ) )
| 135 | '''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__a )
def _UpperCAmelCase ( self ) -> List[Any]:
A = self._create_example_records()
A = Dataset.from_list(__a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__a ):
self.assertDictEqual(__a , example_records[i] )
def _UpperCAmelCase ( self ) -> str:
A = self._create_example_records()
A = Dataset.from_list(__a )
A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _UpperCAmelCase ( self ) -> Optional[int]: # checks what happens with missing columns
A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
A = Dataset.from_list(__a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def _UpperCAmelCase ( self ) -> List[Any]: # checks if the type can be inferred from the second record
A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
A = Dataset.from_list(__a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
A = Dataset.from_list([] )
self.assertEqual(len(__a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 641 | '''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 | 0 |
"""simple docstring"""
import json
import sys
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
with open(snake_case_ , encoding="""utf-8""" ) as f:
lowerCamelCase__ = json.load(snake_case_ )
lowerCamelCase__ = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(snake_case_ ):
lowerCamelCase__ = results[benchmark_name]
lowerCamelCase__ = benchmark_name.split("""/""" )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
lowerCamelCase__ = """| metric |"""
lowerCamelCase__ = """|--------|"""
lowerCamelCase__ = """| new / old (diff) |"""
for metric_name in sorted(snake_case_ ):
lowerCamelCase__ = benchmark_res[metric_name]
lowerCamelCase__ = metric_vals["""new"""]
lowerCamelCase__ = metric_vals.get("""old""" , snake_case_ )
lowerCamelCase__ = metric_vals.get("""diff""" , snake_case_ )
lowerCamelCase__ = f""" {new_val:f}""" if isinstance(snake_case_ , (int, float) ) else """None"""
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(snake_case_ , (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(snake_case_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(snake_case_ ) )
if __name__ == "__main__":
__magic_name__ = sys.argv[1]
__magic_name__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 129 | '''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 346 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | 0 |
import logging
from transformers import PretrainedConfig
__magic_name__ : Any = logging.getLogger(__name__)
__magic_name__ : Any = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class A__ ( UpperCamelCase__ ):
'''simple docstring'''
snake_case__ = """bertabs"""
def __init__( self : Any , _SCREAMING_SNAKE_CASE : int=3_0522 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : Tuple=6 , _SCREAMING_SNAKE_CASE : Dict=512 , _SCREAMING_SNAKE_CASE : int=8 , _SCREAMING_SNAKE_CASE : List[Any]=512 , _SCREAMING_SNAKE_CASE : List[str]=0.2 , _SCREAMING_SNAKE_CASE : List[Any]=6 , _SCREAMING_SNAKE_CASE : int=768 , _SCREAMING_SNAKE_CASE : Any=8 , _SCREAMING_SNAKE_CASE : Dict=2048 , _SCREAMING_SNAKE_CASE : Tuple=0.2 , **_SCREAMING_SNAKE_CASE : Optional[int] , ):
"""simple docstring"""
super().__init__(**__a )
UpperCamelCase = vocab_size
UpperCamelCase = max_pos
UpperCamelCase = enc_layers
UpperCamelCase = enc_hidden_size
UpperCamelCase = enc_heads
UpperCamelCase = enc_ff_size
UpperCamelCase = enc_dropout
UpperCamelCase = dec_layers
UpperCamelCase = dec_hidden_size
UpperCamelCase = dec_heads
UpperCamelCase = dec_ff_size
UpperCamelCase = dec_dropout
| 280 | '''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCamelCase ( UpperCamelCase__,unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = UnCLIPImageVariationPipeline
a_ : Tuple = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
a_ : Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS
a_ : Union[str, Any] = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
a_ : Any = False
@property
def _snake_case ( self : int ):
'''simple docstring'''
return 3_2
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return 3_2
@property
def _snake_case ( self : Dict ):
'''simple docstring'''
return self.time_input_dim
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
return 1_0_0
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__a )
@property
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__a )
@property
def _snake_case ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase : str = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
__lowerCamelCase : Optional[int] = UnCLIPTextProjModel(**__a )
return model
@property
def _snake_case ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase : str = {
"""sample_size""": 3_2,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
__lowerCamelCase : List[Any] = UNetaDConditionModel(**__a )
return model
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _snake_case ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _snake_case ( self : Dict ):
'''simple docstring'''
torch.manual_seed(1 )
__lowerCamelCase : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.dummy_decoder
__lowerCamelCase : Tuple = self.dummy_text_proj
__lowerCamelCase : Dict = self.dummy_text_encoder
__lowerCamelCase : Optional[int] = self.dummy_tokenizer
__lowerCamelCase : Optional[Any] = self.dummy_super_res_first
__lowerCamelCase : Dict = self.dummy_super_res_last
__lowerCamelCase : str = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
__lowerCamelCase : Union[str, Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
__lowerCamelCase : Dict = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
__lowerCamelCase : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _snake_case ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : str=True ):
'''simple docstring'''
__lowerCamelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("""mps""" ):
__lowerCamelCase : Dict = torch.manual_seed(__a )
else:
__lowerCamelCase : Any = torch.Generator(device=__a ).manual_seed(__a )
if pil_image:
__lowerCamelCase : Union[str, Any] = input_image * 0.5 + 0.5
__lowerCamelCase : Union[str, Any] = input_image.clamp(0 , 1 )
__lowerCamelCase : int = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCamelCase : int = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : List[Any] = """cpu"""
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**__a )
__lowerCamelCase : Optional[int] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Any = self.get_dummy_inputs(__a , pil_image=__a )
__lowerCamelCase : Optional[Any] = pipe(**__a )
__lowerCamelCase : Dict = output.images
__lowerCamelCase : int = self.get_dummy_inputs(__a , pil_image=__a )
__lowerCamelCase : List[Any] = pipe(
**__a , return_dict=__a , )[0]
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Union[str, Any] = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : str = """cpu"""
__lowerCamelCase : List[str] = self.get_dummy_components()
__lowerCamelCase : int = self.pipeline_class(**__a )
__lowerCamelCase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(__a , pil_image=__a )
__lowerCamelCase : Optional[Any] = pipe(**__a )
__lowerCamelCase : int = output.images
__lowerCamelCase : Dict = self.get_dummy_inputs(__a , pil_image=__a )
__lowerCamelCase : int = pipe(
**__a , return_dict=__a , )[0]
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
__lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : List[Any] = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Dict = """cpu"""
__lowerCamelCase : List[Any] = self.get_dummy_components()
__lowerCamelCase : str = self.pipeline_class(**__a )
__lowerCamelCase : Union[str, Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a , pil_image=__a )
__lowerCamelCase : str = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
__lowerCamelCase : int = pipe(**__a )
__lowerCamelCase : int = output.images
__lowerCamelCase : Dict = self.get_dummy_inputs(__a , pil_image=__a )
__lowerCamelCase : Any = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
__lowerCamelCase : Tuple = pipe(
**__a , return_dict=__a , )[0]
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
__lowerCamelCase : Any = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = torch.device("""cpu""" )
class _UpperCamelCase :
'''simple docstring'''
a_ : Optional[Any] = 1
__lowerCamelCase : Optional[Any] = self.get_dummy_components()
__lowerCamelCase : str = self.pipeline_class(**__a )
__lowerCamelCase : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(0 )
__lowerCamelCase : Optional[int] = pipe.decoder.dtype
__lowerCamelCase : Tuple = 1
__lowerCamelCase : int = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowerCamelCase : Dict = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
__lowerCamelCase : Optional[Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowerCamelCase : Tuple = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
__lowerCamelCase : str = self.get_dummy_inputs(__a , pil_image=__a )
__lowerCamelCase : int = pipe(
**__a , decoder_latents=__a , super_res_latents=__a ).images
__lowerCamelCase : List[Any] = self.get_dummy_inputs(__a , pil_image=__a )
# Don't pass image, instead pass embedding
__lowerCamelCase : Union[str, Any] = pipeline_inputs.pop("""image""" )
__lowerCamelCase : str = pipe.image_encoder(__a ).image_embeds
__lowerCamelCase : List[Any] = pipe(
**__a , decoder_latents=__a , super_res_latents=__a , image_embeddings=__a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Any = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowerCamelCase : Tuple = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__a , expected_max_diff=__a )
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : List[str] = torch_device == """cpu"""
__lowerCamelCase : List[str] = True
__lowerCamelCase : int = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , additional_params_copy_to_batched_inputs=__a , )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowerCamelCase : Dict = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__a , additional_params_copy_to_batched_inputs=__a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__a )
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _snake_case ( self : int ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
__lowerCamelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
__lowerCamelCase : Optional[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
__lowerCamelCase : Any = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
__lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCamelCase : int = pipeline(
__a , generator=__a , output_type="""np""" , )
__lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__a , __a , 1_5 )
| 519 | '''simple docstring'''
import copy
import re
class __A :
a__ : Optional[int] = """hp"""
a__ : Optional[Any] = {}
a__ : List[Any] = None
@classmethod
def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def _lowercase (__a : List[Any] , __a : List[str] ):
if len(__a ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a : Union[str, Any] ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def _lowercase (__a : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def _lowercase (__a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def _lowercase (cls : Any ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
UpperCAmelCase_ = info
@classmethod
def _lowercase (cls : int , __a : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-"
UpperCAmelCase_ = f"""{key}{sep}{v}"""
name.append(__a )
return "_".join(__a )
@classmethod
def _lowercase (cls : Dict , __a : Dict ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 78 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__SCREAMING_SNAKE_CASE : Optional[int] = 'src/transformers'
# Matches is_xxx_available()
__SCREAMING_SNAKE_CASE : Any = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__SCREAMING_SNAKE_CASE : Tuple = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__SCREAMING_SNAKE_CASE : Tuple = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__SCREAMING_SNAKE_CASE : Tuple = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__SCREAMING_SNAKE_CASE : List[str] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__SCREAMING_SNAKE_CASE : str = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''^\s*try:''')
# Catches a line with else:
__SCREAMING_SNAKE_CASE : str = re.compile(R'''^\s*else:''')
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Union[str, Any]:
if _re_test_backend.search(snake_case_ ) is None:
return None
_lowerCamelCase = [b[0] for b in _re_backend.findall(snake_case_ )]
backends.sort()
return "_and_".join(snake_case_ )
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> int:
with open(snake_case_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = 0
while line_index < len(snake_case_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case_ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case_ ):
_lowerCamelCase = _re_one_line_import_struct.search(snake_case_ ).groups()[0]
_lowerCamelCase = re.findall('''\[([^\]]+)\]''' , snake_case_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_lowerCamelCase = _re_import_struct_key_value.search(snake_case_ )
if single_line_import_search is not None:
_lowerCamelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_lowerCamelCase = lines[line_index]
if _re_import_struct_add_one.search(snake_case_ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case_ ) is not None:
_lowerCamelCase = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(''', ''' )
_lowerCamelCase = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_between_brackets.search(snake_case_ ) is not None:
_lowerCamelCase = _re_between_brackets.search(snake_case_ ).groups()[0].split(''', ''' )
_lowerCamelCase = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_quote_object.search(snake_case_ ) is not None:
objects.append(_re_quote_object.search(snake_case_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''\"''' ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase = []
while (
line_index < len(snake_case_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Union[str, Any] ) -> Dict:
def find_duplicates(lowercase_ : int ):
return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase = []
for key in import_dict_objects.keys():
_lowerCamelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_lowerCamelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCAmelCase_( ) -> Dict:
_lowerCamelCase = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
_lowerCamelCase = os.path.join(snake_case_ , '''__init__.py''' )
_lowerCamelCase = parse_init(snake_case_ )
if objects is not None:
_lowerCamelCase = analyze_results(*snake_case_ )
if len(snake_case_ ) > 0:
_lowerCamelCase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(snake_case_ ) )
if len(snake_case_ ) > 0:
raise ValueError('''\n\n'''.join(snake_case_ ) )
def lowerCAmelCase_( ) -> Optional[int]:
_lowerCamelCase = []
for path, directories, files in os.walk(snake_case_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(snake_case_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_lowerCamelCase = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) )
_lowerCamelCase = short_path.replace(os.path.sep , '''.''' )
submodules.append(snake_case_ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) )
_lowerCamelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(snake_case_ )
return submodules
__SCREAMING_SNAKE_CASE : Tuple = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCAmelCase_( ) -> Any:
_lowerCamelCase = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(snake_case_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowerCamelCase = spec.loader.load_module()
_lowerCamelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case_ ) > 0:
_lowerCamelCase = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 0 |
from math import pi, sqrt, tan
def __lowerCAmelCase ( A_ : float ) -> float:
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def __lowerCAmelCase ( A_ : float , A_ : float , A_ : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase ( A_ : float ) -> float:
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def __lowerCAmelCase ( A_ : float ) -> float:
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def __lowerCAmelCase ( A_ : float , A_ : float ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase ( A_ : float , A_ : float , A_ : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
__UpperCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase ( A_ : float , A_ : float ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase ( A_ : float , A_ : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(snake_case_ , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase ( A_ : float , A_ : float ) -> float:
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def __lowerCAmelCase ( A_ : float ) -> float:
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def __lowerCAmelCase ( A_ : float , A_ : float ) -> float:
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def __lowerCAmelCase ( A_ : float , A_ : float , A_ : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
__UpperCAmelCase = (sidea + sidea + sidea) / 2
__UpperCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase ( A_ : float , A_ : float ) -> float:
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def __lowerCAmelCase ( A_ : float , A_ : float , A_ : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase ( A_ : float ) -> float:
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def __lowerCAmelCase ( A_ : float , A_ : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def __lowerCAmelCase ( A_ : float , A_ : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase ( A_ : int , A_ : float ) -> float:
if not isinstance(snake_case_ , snake_case_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print("""\nSurface Areas of various geometric shapes: \n""")
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 221 | '''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowerCamelCase ( UpperCamelCase__ ):
_lowerCamelCase :str = """gpt_neo"""
_lowerCamelCase :Union[str, Any] = ["""past_key_values"""]
_lowerCamelCase :List[str] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : str , UpperCamelCase : List[Any]=5_02_57 , UpperCamelCase : Tuple=20_48 , UpperCamelCase : Optional[Any]=20_48 , UpperCamelCase : Dict=24 , UpperCamelCase : List[Any]=[[["global", "local"], 12]] , UpperCamelCase : List[Any]=16 , UpperCamelCase : Optional[int]=None , UpperCamelCase : Tuple=2_56 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : List[str]=0.0 , UpperCamelCase : int=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[int]=1E-5 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : str=True , UpperCamelCase : int=5_02_56 , UpperCamelCase : int=5_02_56 , **UpperCamelCase : Dict , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Any = num_layers
lowerCAmelCase__ : str = num_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : Optional[int] = window_size
lowerCAmelCase__ : Tuple = activation_function
lowerCAmelCase__ : List[Any] = resid_dropout
lowerCAmelCase__ : Dict = embed_dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : int = classifier_dropout
lowerCAmelCase__ : str = layer_norm_epsilon
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Optional[Any] = use_cache
lowerCAmelCase__ : Optional[int] = bos_token_id
lowerCAmelCase__ : str = eos_token_id
lowerCAmelCase__ : List[Any] = attention_types
lowerCAmelCase__ : Dict = self.expand_attention_types_params(__a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
import torch
lowerCAmelCase__ : Any = input.size()
lowerCAmelCase__ : Union[str, Any] = len(snake_case_ )
lowerCAmelCase__ : Tuple = shape[dimension]
lowerCAmelCase__ : List[str] = torch.arange(0 , snake_case_ , snake_case_ )
lowerCAmelCase__ : str = torch.div(sizedim - size , snake_case_ , rounding_mode="""floor""" ) + 1
lowerCAmelCase__ : int = torch.arange(snake_case_ ) + low_indices[:min_length][:, None]
lowerCAmelCase__ : List[Any] = [slice(snake_case_ )] * rank
lowerCAmelCase__ : Tuple = indices
lowerCAmelCase__ : Tuple = input[s]
lowerCAmelCase__ : str = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case_ )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
import torch
lowerCAmelCase__ : Tuple = torch.arange(1 , snake_case_ )
lowerCAmelCase__ : Union[str, Any] = torch.remainder(snake_case_ , snake_case_ )
lowerCAmelCase__ : Union[str, Any] = remainders == 0
lowerCAmelCase__ : Dict = candidates[divisor_indices]
lowerCAmelCase__ : str = torch.max(snake_case_ )
return largest_divisor, torch.div(snake_case_ , snake_case_ , rounding_mode="""floor""" )
class _lowerCamelCase ( UpperCamelCase__ ):
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
lowerCAmelCase__ : Tuple = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase__ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self._config.num_heads
def _lowerCAmelCase ( self : int , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : Optional[int] = seqlen + 2
lowerCAmelCase__ : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ : Optional[int] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
lowerCAmelCase__ : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase__ : str = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase__ : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return 13
| 299 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : int , _lowerCAmelCase : Optional[NestedDataStructureLike[PathLike]] = None , _lowerCAmelCase : Optional[NamedSplit] = None , _lowerCAmelCase : Optional[Features] = None , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
snake_case_ = path_or_paths
snake_case_ = split if split or isinstance(__a , __a ) else "train"
snake_case_ = features
snake_case_ = cache_dir
snake_case_ = keep_in_memory
snake_case_ = streaming
snake_case_ = num_proc
snake_case_ = kwargs
@abstractmethod
def lowerCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
pass
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[Features] = None , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : Any , ) -> str:
"""simple docstring"""
snake_case_ = features
snake_case_ = cache_dir
snake_case_ = keep_in_memory
snake_case_ = streaming
snake_case_ = num_proc
snake_case_ = kwargs
@abstractmethod
def lowerCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
| 283 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """perceiver"""
def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-4
def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 78 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase__ :int = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCamelCase__ :Any = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
UpperCamelCase__ :List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install \"sacrebleu>=1.4.12\"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE__ = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE__ = CHRF.BETA , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :str = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_UpperCamelCase :Optional[int] = [[refs[i] for refs in references] for i in range(__a )]
_UpperCamelCase :Union[str, Any] = CHRF(__a , __a , __a , __a , __a , __a )
_UpperCamelCase :Dict = sb_chrf.corpus_score(__a , __a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 355 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ='▁'
__SCREAMING_SNAKE_CASE : List[Any] ={
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
__SCREAMING_SNAKE_CASE : Any ={
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
__SCREAMING_SNAKE_CASE : int ={
'facebook/m2m100_418M': 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE : Union[str, Any] ={
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
A__ : List[int] = []
A__ : List[int] = []
def __init__( self , A , A , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<pad>" , A="<unk>" , A="m2m100" , A = None , A=8 , **A , ) -> Dict:
A: Dict = {} if sp_model_kwargs is None else sp_model_kwargs
A: Any = language_codes
A: int = FAIRSEQ_LANGUAGE_CODES[language_codes]
A: Any = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
A: Optional[Any] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__a )
for lang_code in fairseq_language_code
if self.get_lang_token(__a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__a , tgt_lang=__a , bos_token=__a , eos_token=__a , sep_token=__a , unk_token=__a , pad_token=__a , language_codes=__a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__a , **__a , )
A: Optional[int] = vocab_file
A: Tuple = load_json(__a )
A: int = {v: k for k, v in self.encoder.items()}
A: List[Any] = spm_file
A: Optional[int] = load_spm(__a , self.sp_model_kwargs )
A: List[Any] = len(self.encoder )
A: Optional[Any] = {
self.get_lang_token(__a ): self.encoder_size + i for i, lang_code in enumerate(__a )
}
A: str = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__a )}
A: Tuple = {v: k for k, v in self.lang_token_to_id.items()}
A: str = src_lang if src_lang is not None else """en"""
A: Optional[Any] = tgt_lang
A: Optional[int] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A: str = num_madeup_words
@property
def a__ ( self ) -> List[str]:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a__ ( self ) -> List[Any]:
return self._src_lang
@src_lang.setter
def a__ ( self , A ) -> List[str]:
A: Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ ( self , A ) -> int:
return self.sp_model.encode(__a , out_type=__a )
def a__ ( self , A ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__a , self.encoder[self.unk_token] )
def a__ ( self , A ) -> Dict:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__a , self.unk_token )
def a__ ( self , A ) -> Optional[int]:
A: Optional[Any] = []
A: Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
A: Tuple = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def a__ ( self , A , A = None , A = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
A: Tuple = [1] * len(self.prefix_tokens )
A: List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def a__ ( self , A , A = None ) -> List[Any]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ ( self ) -> int:
A: str = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
A: Any = self.__dict__.copy()
A: Optional[Any] = None
return state
def __setstate__( self , A ) -> List[Any]:
A: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A: Optional[int] = {}
A: Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def a__ ( self , A , A = None ) -> Tuple:
A: Tuple = Path(__a )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
A: str = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
A: Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , """wb""" ) as fi:
A: List[Any] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def a__ ( self , A , A = "en" , A = None , A = "ro" , **A , ) -> str:
A: Any = src_lang
A: Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__a , __a , **__a )
def a__ ( self , A , A , A , **A ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A: Optional[Any] = src_lang
A: Tuple = self(__a , add_special_tokens=__a , **__a )
A: List[str] = self.get_lang_id(__a )
A: str = tgt_lang_id
return inputs
def a__ ( self ) -> List[str]:
self.set_src_lang_special_tokens(self.src_lang )
def a__ ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ ( self , A ) -> Union[str, Any]:
A: List[Any] = self.get_lang_token(__a )
A: List[Any] = self.lang_token_to_id[lang_token]
A: List[Any] = [self.cur_lang_id]
A: Optional[Any] = [self.eos_token_id]
def a__ ( self , A ) -> Any:
A: int = self.get_lang_token(__a )
A: Dict = self.lang_token_to_id[lang_token]
A: Tuple = [self.cur_lang_id]
A: int = [self.eos_token_id]
def a__ ( self , A ) -> Tuple:
return self.lang_code_to_token[lang]
def a__ ( self , A ) -> Optional[Any]:
A: Dict = self.get_lang_token(__a )
return self.lang_token_to_id[lang_token]
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ):
'''simple docstring'''
A: Any = sentencepiece.SentencePieceProcessor(**snake_case_ )
spm.Load(str(snake_case_ ) )
return spm
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : str ):
'''simple docstring'''
with open(snake_case_ , """r""" ) as f:
return json.load(snake_case_ )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str ):
'''simple docstring'''
with open(snake_case_ , """w""" ) as f:
json.dump(snake_case_ , snake_case_ , indent=2 )
| 135 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: float | Decimal , UpperCamelCase__: float = 10**-10 ) -> float:
"""simple docstring"""
A = a
while True:
A = Decimal(snake_case_ ) - (
Decimal(eval(snake_case_ ) ) / Decimal(eval(str(diff(snake_case_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case_ ) ) < precision: # noqa: S307
return float(snake_case_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 641 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _A ( __lowercase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__magic_name__ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def _A ( __lowercase ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(snake_case_ ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(snake_case_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case_ ) == n:
return list_nums
return []
def _A ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 129 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class snake_case ( UpperCamelCase__ ):
def __init__( self : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int)-> int:
'''simple docstring'''
super().__init__(*__a , **__a)
requires_backends(self , "vision")
self.check_model_type(__a)
def __call__( self : Optional[Any] , UpperCamelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase__ : List[str])-> int:
'''simple docstring'''
return super().__call__(__a , **__a)
def lowercase_ ( self : Union[str, Any] , **UpperCamelCase__ : Optional[int])-> Optional[int]:
'''simple docstring'''
return {}, {}, {}
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = load_image(__a)
__lowerCAmelCase: List[Any] = image.size
__lowerCAmelCase: Optional[Any] = self.image_processor(images=__a , return_tensors=self.framework)
return model_inputs
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.model(**__a)
return model_outputs
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[str])-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = model_outputs.predicted_depth
__lowerCAmelCase: Tuple = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__a)
__lowerCAmelCase: Optional[Any] = prediction.squeeze().cpu().numpy()
__lowerCAmelCase: str = (output * 2_5_5 / np.max(__a)).astype("uint8")
__lowerCAmelCase: List[Any] = Image.fromarray(__a)
__lowerCAmelCase: Tuple = {}
__lowerCAmelCase: Dict = predicted_depth
__lowerCAmelCase: Any = depth
return output_dict
| 346 | '''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase__ ( _UpperCamelCase) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = filter(lambda _UpperCamelCase: p.requires_grad , model.parameters())
UpperCamelCase = sum([np.prod(p.size()) for p in model_parameters])
return params
__magic_name__ : Tuple = logging.getLogger(__name__)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Union[str, Any]:
"""simple docstring"""
if metric == "rouge2":
UpperCamelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
UpperCamelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
UpperCamelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.')
UpperCamelCase = ModelCheckpoint(
dirpath=snake_case_ , filename=snake_case_ , monitor=F'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Any:
"""simple docstring"""
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=snake_case_ , verbose=snake_case_ , )
class A__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = {f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=True ):
"""simple docstring"""
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase = od / 'test_results.txt'
UpperCamelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
UpperCamelCase = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , 'a+' ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase = metrics[key]
if isinstance(__a , torch.Tensor ):
UpperCamelCase = val.item()
UpperCamelCase = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(__a )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
try:
UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase = pl_module.model.num_parameters()
UpperCamelCase = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , 'test' )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 280 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase : Any = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
__UpperCamelCase : int = {
'RUCAIBox/mvp': 1024,
}
class _UpperCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
a_ : Optional[int] = VOCAB_FILES_NAMES
a_ : str = PRETRAINED_VOCAB_FILES_MAP
a_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any = ["""input_ids""", """attention_mask"""]
a_ : List[Any] = MvpTokenizer
def __init__( self : Dict , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Any="replace" , _lowerCamelCase : Optional[Any]="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : int="</s>" , _lowerCamelCase : Optional[int]="<s>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : str="<pad>" , _lowerCamelCase : List[Any]="<mask>" , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=True , **_lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
__lowerCamelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
__lowerCamelCase : str = getattr(__a , pre_tok_state.pop("""type""" ) )
__lowerCamelCase : int = add_prefix_space
__lowerCamelCase : Optional[Any] = pre_tok_class(**__a )
__lowerCamelCase : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCamelCase : Optional[int] = """post_processor"""
__lowerCamelCase : Union[str, Any] = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
__lowerCamelCase : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCamelCase : List[Any] = tuple(state["""sep"""] )
if "cls" in state:
__lowerCamelCase : List[str] = tuple(state["""cls"""] )
__lowerCamelCase : Optional[Any] = False
if state.get("""add_prefix_space""" , __a ) != add_prefix_space:
__lowerCamelCase : Optional[Any] = add_prefix_space
__lowerCamelCase : Union[str, Any] = True
if state.get("""trim_offsets""" , __a ) != trim_offsets:
__lowerCamelCase : str = trim_offsets
__lowerCamelCase : Any = True
if changes_to_apply:
__lowerCamelCase : List[Any] = getattr(__a , state.pop("""type""" ) )
__lowerCamelCase : Any = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
def _snake_case ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self : str , _lowerCamelCase : str ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
__lowerCamelCase : List[Any] = value
def _snake_case ( self : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = kwargs.get("""is_split_into_words""" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__a , **__a )
def _snake_case ( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : int ):
'''simple docstring'''
__lowerCamelCase : Dict = kwargs.get("""is_split_into_words""" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__a , **__a )
def _snake_case ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
__lowerCamelCase : Any = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _snake_case ( self : Tuple , _lowerCamelCase : Any , _lowerCamelCase : List[str]=None ):
'''simple docstring'''
__lowerCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowerCamelCase : int = [self.sep_token_id]
__lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 519 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor']
SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 | 0 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def lowerCAmelCase_( lowercase_ : List[str] ) -> str:
_lowerCamelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"""{test_file} instead.""" )
_lowerCamelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
_lowerCamelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
_lowerCamelCase = '''.'''.join(snake_case_ )
return test_module_path
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = get_module_path(snake_case_ )
_lowerCamelCase = importlib.import_module(snake_case_ )
return test_module
def lowerCAmelCase_( lowercase_ : Dict ) -> str:
_lowerCamelCase = []
_lowerCamelCase = get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(snake_case_ , snake_case_ ) )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Union[str, Any]:
_lowerCamelCase = []
_lowerCamelCase = get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
_lowerCamelCase = getattr(snake_case_ , snake_case_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowerCamelCase = getattr(snake_case_ , '''all_model_classes''' , [] )
if len(snake_case_ ) > 0:
test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> Any:
_lowerCamelCase = get_test_classes(snake_case_ )
_lowerCamelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : str ) -> List[str]:
_lowerCamelCase = test_class()
if hasattr(snake_case_ , '''setUp''' ):
test.setUp()
_lowerCamelCase = None
if hasattr(snake_case_ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowerCamelCase = test.model_tester.__class__
return model_tester
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] ) -> str:
_lowerCamelCase = get_test_classes(snake_case_ )
_lowerCamelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : str ) -> List[Any]:
_lowerCamelCase = get_test_classes_for_model(snake_case_ , snake_case_ )
_lowerCamelCase = []
for test_class in test_classes:
_lowerCamelCase = get_model_tester_from_test_class(snake_case_ )
if tester_class is not None:
tester_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Union[str, Any]:
_lowerCamelCase = get_test_classes(snake_case_ )
_lowerCamelCase = {test_class: get_model_tester_from_test_class(snake_case_ ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> str:
_lowerCamelCase = get_model_classes(snake_case_ )
_lowerCamelCase = {
model_class: get_test_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase_( lowercase_ : List[Any] ) -> int:
_lowerCamelCase = get_model_classes(snake_case_ )
_lowerCamelCase = {
model_class: get_tester_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase_( lowercase_ : int ) -> List[str]:
if isinstance(snake_case_ , snake_case_ ):
return o
elif isinstance(snake_case_ , snake_case_ ):
return o.__name__
elif isinstance(snake_case_ , (list, tuple) ):
return [to_json(snake_case_ ) for x in o]
elif isinstance(snake_case_ , snake_case_ ):
return {to_json(snake_case_ ): to_json(snake_case_ ) for k, v in o.items()}
else:
return o
| 661 | '''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'sentencepiece.model'}
a_ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
a_ = {
'google/rembert': 256,
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ : Any = VOCAB_FILES_NAMES
lowerCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Union[str, Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Union[str, Any]=False , __lowerCAmelCase: Union[str, Any]=True , __lowerCAmelCase: Optional[Any]=True , __lowerCAmelCase: str="[CLS]" , __lowerCAmelCase: Optional[Any]="[SEP]" , __lowerCAmelCase: List[str]="[UNK]" , __lowerCAmelCase: Optional[int]="[SEP]" , __lowerCAmelCase: List[Any]="[PAD]" , __lowerCAmelCase: int="[CLS]" , __lowerCAmelCase: Dict="[MASK]" , **__lowerCAmelCase: Optional[int] , ) -> int:
'''simple docstring'''
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(__a )
@property
def _UpperCAmelCase ( self: Any ) -> Tuple:
'''simple docstring'''
return len(self.sp_model )
def _UpperCAmelCase ( self: List[Any] ) -> str:
'''simple docstring'''
__UpperCAmelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Dict ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self: str , __lowerCAmelCase: Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: int=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.sp_model.EncodeAsPieces(__a )
return pieces
def _UpperCAmelCase ( self: str , __lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
return self.sp_model.PieceToId(__a )
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(__a )
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Union[str, Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.sp_model.decode_pieces(__a )
return out_string
def _UpperCAmelCase ( self: int , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None , __lowerCAmelCase: bool = False ) -> List[Any]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: str , __lowerCAmelCase: Optional[str] = None ) -> Dict:
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error("Vocabulary path ({}) should be a directory".format(__a ) )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 221 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
def __init__( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int=13 , UpperCamelCase : List[Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : int=True , UpperCamelCase : Dict=99 , UpperCamelCase : str=16 , UpperCamelCase : List[str]=36 , UpperCamelCase : Tuple=6 , UpperCamelCase : List[str]=6 , UpperCamelCase : Tuple=6 , UpperCamelCase : Dict=37 , UpperCamelCase : Dict="gelu" , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : str=5_12 , UpperCamelCase : Dict=16 , UpperCamelCase : int=2 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : List[str]=3 , UpperCamelCase : int=4 , UpperCamelCase : Union[str, Any]=None , ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Dict = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : str = use_input_mask
lowerCAmelCase__ : Tuple = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Tuple = embedding_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_hidden_groups
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : str = type_sequence_label_size
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : Dict = num_choices
lowerCAmelCase__ : Any = scope
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : List[Any] = None
if self.use_input_mask:
lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : int = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowerCAmelCase ( self : int , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = AlbertModel(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )
lowerCAmelCase__ : int = model(__a , token_type_ids=__a )
lowerCAmelCase__ : Dict = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = AlbertForPreTraining(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : str = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = AlbertForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : int = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = AlbertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.num_labels
lowerCAmelCase__ : Dict = AlbertForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : List[str] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : int = AlbertForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : int = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.num_choices
lowerCAmelCase__ : Any = AlbertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowerCamelCase :Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase :Any = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase :str = True
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : str=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
lowerCAmelCase__ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = AlbertModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*__a )
@slow
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = AlbertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Any = AlbertModel.from_pretrained("""albert-base-v2""" )
lowerCAmelCase__ : Any = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCAmelCase__ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : str = model(__a , attention_mask=__a )[0]
lowerCAmelCase__ : Any = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __a )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
| 299 | '''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE :Optional[int] = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE :str = NewType('''DataClassType''', Any)
def _lowerCAmelCase ( lowerCAmelCase_ :int )->Any:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _lowerCAmelCase ( lowerCAmelCase_ :list )->Callable[[str], Any]:
'''simple docstring'''
snake_case_ = {str(snake_case_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(snake_case_ , snake_case_ )
def _lowerCAmelCase ( *,
lowerCAmelCase_ :Union[str, List[str]] = None , lowerCAmelCase_ :str = None , lowerCAmelCase_ :Any = dataclasses.MISSING , lowerCAmelCase_ :Callable[[], Any] = dataclasses.MISSING , lowerCAmelCase_ :dict = None , **lowerCAmelCase_ :Optional[int] , )->dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case_ = {}
if aliases is not None:
snake_case_ = aliases
if help is not None:
snake_case_ = help
return dataclasses.field(metadata=snake_case_ , default=snake_case_ , default_factory=snake_case_ , **snake_case_ )
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
def __init__( self : Optional[Any] , _lowerCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **_lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
# To make the default appear when using --help
if "formatter_class" not in kwargs:
snake_case_ = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
snake_case_ = [dataclass_types]
snake_case_ = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase : ArgumentParser , _lowerCAmelCase : dataclasses.Field ) -> str:
"""simple docstring"""
snake_case_ = F'''--{field.name}'''
snake_case_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
snake_case_ = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
snake_case_ = [aliases]
snake_case_ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F''' Problem encountered in field \'{field.name}\'.''' )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
snake_case_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case_ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case_ = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case_ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case_ = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
snake_case_ = field.type.__args__
else:
snake_case_ = [x.value for x in field.type]
snake_case_ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
else:
snake_case_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case_ = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
snake_case_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case_ = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case_ = "?"
# This is the value that will get picked if we do --field_name (without value)
snake_case_ = True
elif isclass(__a ) and issubclass(__a , __a ):
snake_case_ = field.type.__args__[0]
snake_case_ = "+"
if field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case_ = True
else:
snake_case_ = field.type
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
else:
snake_case_ = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case_ = False
parser.add_argument(F'''--no_{field.name}''' , action="store_false" , dest=field.name , **__a )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : DataClassType ) -> Optional[int]:
"""simple docstring"""
if hasattr(__a , "_argument_group_name" ):
snake_case_ = self.add_argument_group(dtype._argument_group_name )
else:
snake_case_ = self
try:
snake_case_ = get_type_hints(__a )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__a ):
snake_case_ = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
snake_case_ = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , ) -> str:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case_ = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case_ = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case_ , snake_case_ = args_file_parser.parse_known_args(args=__a )
snake_case_ = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
snake_case_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case_ = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case_ , snake_case_ = self.parse_known_args(args=__a )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__a ) if f.init}
snake_case_ = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
snake_case_ = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Dict[str, Any] , _lowerCAmelCase : bool = False ) -> Dict:
"""simple docstring"""
snake_case_ = set(args.keys() )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__a ) if f.init}
snake_case_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case_ = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(__a )}''' )
return tuple(__a )
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
snake_case_ = json.loads(open_json_file.read() )
snake_case_ = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> Tuple:
"""simple docstring"""
snake_case_ = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 283 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
# load original model from timm
UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A = ProphetNetTokenizer
A = False
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
super().setUp()
_UpperCamelCase :Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
_UpperCamelCase :str = '''UNwant\u00E9d,running'''
_UpperCamelCase :Tuple = '''unwanted, running'''
return input_text, output_text
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Dict = self.tokenizer_class(self.vocab_file )
_UpperCamelCase :Dict = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCamelCase( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Optional[int] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :str = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[int] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[str] = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_UpperCamelCase :Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase :str = i
_UpperCamelCase :Any = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_UpperCamelCase :Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_UpperCamelCase :Optional[int] = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
_UpperCamelCase :str = tokenizer(__a , padding=__a , return_tensors='''pt''' )
self.assertIsInstance(__a , __a )
_UpperCamelCase :List[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_UpperCamelCase :int = tokenizer.encode('''sequence builders''' , add_special_tokens=__a )
_UpperCamelCase :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a )
_UpperCamelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 355 | '''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 0
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def _lowercase (self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
UpperCAmelCase_ = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Tuple ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" )
def _lowercase (self : Optional[int] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowercase (self : Optional[int] ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json"
UpperCAmelCase_ = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : Optional[int] ):
class __A ( UpperCamelCase__ ):
a__ : str = True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 78 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ):
'''simple docstring'''
A: Dict = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | '''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase ( UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any]=0.9_99 , UpperCamelCase__: Tuple="cosine" , ) -> Optional[Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__: Optional[int] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__: Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case_ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase = 2
@register_to_config
def __init__( self , a__ = 1000 , a__ = 0.0_00_85 , a__ = 0.0_12 , a__ = "linear" , a__ = None , a__ = "epsilon" , a__ = False , a__ = False , a__ = 1.0 , a__ = "linspace" , a__ = 0 , ) -> Optional[int]:
if trained_betas is not None:
A = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(__a , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(__a , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
A = use_karras_sigmas
def _UpperCAmelCase ( self , a__ , a__=None ) -> List[str]:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(__a ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _UpperCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _UpperCAmelCase ( self , a__ , a__ , ) -> List[str]:
A = self.index_for_timestep(__a )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _UpperCAmelCase ( self , a__ , a__ = None , a__ = None , ) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(__a )
A = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(__a ).to(device=__a )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(__a )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("""mps""" ):
# mps does not support float64
A = timesteps.to(__a , dtype=torch.floataa )
else:
A = timesteps.to(device=__a )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(__a )
def _UpperCAmelCase ( self , a__ , a__ ) -> List[str]:
# get log sigma
A = np.log(__a )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(__a , 0 , 1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _UpperCAmelCase ( self , a__ , a__ ) -> List[Any]:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 , 1 , __a )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return self.dt is None
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ = True , ) -> List[Any]:
A = self.index_for_timestep(__a )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _UpperCAmelCase ( self , a__ , a__ , a__ , ) -> Optional[int]:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
A = self.timesteps.to(original_samples.device , dtype=torch.floataa )
A = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(__a , __a ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 641 | '''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78 | 0 |
"""simple docstring"""
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = 0
while len(snake_case_ ) > 1:
lowerCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCamelCase__ = files.index(min(snake_case_ ) )
temp += files[min_index]
files.pop(snake_case_ )
files.append(snake_case_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 | '''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class snake_case :
def __init__( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=1_3 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Union[str, Any]=1_7 , UpperCamelCase__ : List[Any]=2_3 , UpperCamelCase__ : Optional[int]=1_1 , UpperCamelCase__ : str=True , )-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = parent
__lowerCAmelCase: Any = batch_size
__lowerCAmelCase: List[Any] = seq_length
__lowerCAmelCase: Optional[Any] = act_dim
__lowerCAmelCase: Union[str, Any] = state_dim
__lowerCAmelCase: Dict = hidden_size
__lowerCAmelCase: Optional[int] = max_length
__lowerCAmelCase: str = is_training
def lowercase_ ( self : Tuple)-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
__lowerCAmelCase: Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
__lowerCAmelCase: Tuple = floats_tensor((self.batch_size, self.seq_length, 1))
__lowerCAmelCase: Any = floats_tensor((self.batch_size, self.seq_length, 1))
__lowerCAmelCase: List[str] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0)
__lowerCAmelCase: Dict = random_attention_mask((self.batch_size, self.seq_length))
__lowerCAmelCase: int = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowercase_ ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , )-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = DecisionTransformerModel(config=__a)
model.to(__a)
model.eval()
__lowerCAmelCase: Optional[int] = model(__a , __a , __a , __a , __a , __a)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def lowercase_ ( self : Tuple)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = config_and_inputs
__lowerCAmelCase: Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = (DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = DecisionTransformerModelTester(self)
__lowerCAmelCase: Any = ConfigTester(self , config_class=__a , hidden_size=3_7)
def lowercase_ ( self : Optional[int])-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@slow
def lowercase_ ( self : Optional[Any])-> Optional[int]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Tuple = DecisionTransformerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowercase_ ( self : Optional[int])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: Any = model_class(__a)
__lowerCAmelCase: Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase: List[Any] = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__a)] , __a)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: int = 2 # number of steps of autoregressive prediction we will perform
__lowerCAmelCase: Optional[int] = 1_0 # defined by the RL environment, may be normalized
__lowerCAmelCase: Dict = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert")
__lowerCAmelCase: Optional[int] = model.to(__a)
__lowerCAmelCase: Optional[Any] = model.config
torch.manual_seed(0)
__lowerCAmelCase: Optional[Any] = torch.randn(1 , 1 , config.state_dim).to(device=__a , dtype=torch.floataa) # env.reset()
__lowerCAmelCase: Any = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=__a)
__lowerCAmelCase: List[Any] = torch.tensor(__a , device=__a , dtype=torch.floataa).reshape(1 , 1 , 1)
__lowerCAmelCase: List[Any] = state
__lowerCAmelCase: str = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa)
__lowerCAmelCase: Optional[int] = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa)
__lowerCAmelCase: Dict = torch.tensor(0 , device=__a , dtype=torch.long).reshape(1 , 1)
for step in range(__a):
__lowerCAmelCase: List[str] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a)] , dim=1)
__lowerCAmelCase: int = torch.cat([rewards, torch.zeros(1 , 1 , device=__a)] , dim=1)
__lowerCAmelCase: Tuple = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=__a , dtype=torch.floataa),
1.0,
False,
{},
)
__lowerCAmelCase: str = action_pred[0, -1]
__lowerCAmelCase: Tuple = torch.cat([states, state] , dim=1)
__lowerCAmelCase: Dict = returns_to_go[0, -1] - reward
__lowerCAmelCase: Union[str, Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
__lowerCAmelCase: Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long) * (step + 1)] , dim=1)
| 346 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78 | 0 |
import requests
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> None:
"""simple docstring"""
UpperCamelCase = {'Content-Type': 'application/json'}
UpperCamelCase = requests.post(snake_case_ , json={'text': message_body} , headers=snake_case_)
if response.status_code != 2_00:
UpperCamelCase = (
'Request to slack returned an error '
F'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(snake_case_)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 280 | '''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | 0 |
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : Any = len(snake_case_ )
__lowerCamelCase : List[str] = len(snake_case_ )
__lowerCamelCase : Dict = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowerCamelCase : Dict = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowerCamelCase : List[Any] = True
if a[i].islower():
__lowerCamelCase : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519 | '''simple docstring'''
import copy
import re
class __A :
a__ : Optional[int] = """hp"""
a__ : Optional[Any] = {}
a__ : List[Any] = None
@classmethod
def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def _lowercase (__a : List[Any] , __a : List[str] ):
if len(__a ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a : Union[str, Any] ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def _lowercase (__a : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__a )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def _lowercase (__a : int , __a : Union[str, Any] ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def _lowercase (cls : Any ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
UpperCAmelCase_ = info
@classmethod
def _lowercase (cls : int , __a : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__a , __a ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-"
UpperCAmelCase_ = f"""{key}{sep}{v}"""
name.append(__a )
return "_".join(__a )
@classmethod
def _lowercase (cls : Dict , __a : Dict ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 78 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__SCREAMING_SNAKE_CASE : Any = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_( lowercase_ : Any ) -> str:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ) -> Optional[int]:
if args.student_type == "roberta":
_lowerCamelCase = False
elif args.student_type == "gpt2":
_lowerCamelCase = False
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Tuple:
if args.student_type == "roberta":
_lowerCamelCase = False
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=40_00 , help='''Checkpoint interval.''' )
_lowerCamelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = MODEL_CLASSES[args.student_type]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCamelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCamelCase = tokenizer.all_special_tokens.index(snake_case_ )
_lowerCamelCase = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
_lowerCamelCase = special_tok_ids
_lowerCamelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
_lowerCamelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
_lowerCamelCase = pickle.load(snake_case_ )
_lowerCamelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCamelCase = 0.0 # do not predict special tokens
_lowerCamelCase = torch.from_numpy(snake_case_ )
else:
_lowerCamelCase = None
_lowerCamelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
_lowerCamelCase = student_config_class.from_pretrained(args.student_config )
_lowerCamelCase = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
_lowerCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
_lowerCamelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
_lowerCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCamelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 661 | '''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ = logging.get_logger(__name__)
a_ = Dict[str, Any]
a_ = List[Prediction]
@add_end_docstrings(UpperCamelCase__ )
class UpperCAmelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self: str , *__lowerCAmelCase: Union[str, Any] , **__lowerCAmelCase: Dict ) -> int:
'''simple docstring'''
super().__init__(*__a , **__a )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _UpperCAmelCase ( self: List[str] , **__lowerCAmelCase: List[str] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = {}
if "threshold" in kwargs:
__UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self: int , *__lowerCAmelCase: Optional[Any] , **__lowerCAmelCase: int ) -> Tuple:
'''simple docstring'''
return super().__call__(*__a , **__a )
def _UpperCAmelCase ( self: str , __lowerCAmelCase: str ) -> str:
'''simple docstring'''
__UpperCAmelCase = load_image(__a )
__UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
__UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
__UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
__UpperCAmelCase = target_size
return inputs
def _UpperCAmelCase ( self: str , __lowerCAmelCase: Optional[Any] ) -> str:
'''simple docstring'''
__UpperCAmelCase = model_inputs.pop("target_size" )
__UpperCAmelCase = self.model(**__a )
__UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
__UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[Any]=0.9 ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__UpperCAmelCase , __UpperCAmelCase = target_size[0].tolist()
def unnormalize(__lowerCAmelCase: str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
__UpperCAmelCase , __UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__UpperCAmelCase = [unnormalize(__a ) for bbox in model_outputs["bbox"].squeeze(0 )]
__UpperCAmelCase = ["score", "label", "box"]
__UpperCAmelCase = [dict(zip(__a , __a ) ) for vals in zip(scores.tolist() , __a , __a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__UpperCAmelCase = self.image_processor.post_process_object_detection(__a , __a , __a )
__UpperCAmelCase = raw_annotations[0]
__UpperCAmelCase = raw_annotation["scores"]
__UpperCAmelCase = raw_annotation["labels"]
__UpperCAmelCase = raw_annotation["boxes"]
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
__UpperCAmelCase = [self._get_bounding_box(__a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__UpperCAmelCase = ["score", "label", "box"]
__UpperCAmelCase = [
dict(zip(__a , __a ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: "torch.Tensor" ) -> str:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = box.int().tolist()
__UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 221 | '''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE_: Dict =[
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}."
SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv."
SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.'
SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}."
SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0."
SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0."
SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}."
SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}."
SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE_: int =[
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ )
UpperCAmelCase_ = v
UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE_: List[Any] =[
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
UpperCAmelCase_ = k[: -len(".q_proj.weight" )]
UpperCAmelCase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
UpperCAmelCase_ = k[: -len(".q_proj.bias" )]
UpperCAmelCase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_ = [None, None, None]
UpperCAmelCase_ = v
continue
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ )
UpperCAmelCase_ = torch.cat(snake_case_ )
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu')
else:
SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 299 | '''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class __A :
def __init__(self : int , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ):
UpperCAmelCase_ = regularization
UpperCAmelCase_ = gamma
if kernel == "linear":
UpperCAmelCase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ = f"""Unknown kernel: {kernel}"""
raise ValueError(__a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.dot(__a , __a )
def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ):
UpperCAmelCase_ = observations
UpperCAmelCase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
UpperCAmelCase_ = 0
((UpperCAmelCase_) , ) = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 )
UpperCAmelCase_ = Bounds(0 , self.regularization )
UpperCAmelCase_ = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
UpperCAmelCase_ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ = s / n
def _lowercase (self : Optional[int] , __a : ndarray ):
UpperCAmelCase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """perceiver"""
def __init__( self : Optional[int] , _lowerCAmelCase : Tuple=2_5_6 , _lowerCAmelCase : Optional[Any]=1_2_8_0 , _lowerCAmelCase : Optional[int]=7_6_8 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : List[str]=2_6 , _lowerCAmelCase : Dict=8 , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[int]="kv" , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple=2_6_2 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : int=5_6 , _lowerCAmelCase : Optional[int]=[3_6_8, 4_9_6] , _lowerCAmelCase : Any=1_6 , _lowerCAmelCase : Optional[Any]=1_9_2_0 , _lowerCAmelCase : Any=1_6 , _lowerCAmelCase : str=[1, 1_6, 2_2_4, 2_2_4] , **_lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__a )
snake_case_ = num_latents
snake_case_ = d_latents
snake_case_ = d_model
snake_case_ = num_blocks
snake_case_ = num_self_attends_per_block
snake_case_ = num_self_attention_heads
snake_case_ = num_cross_attention_heads
snake_case_ = qk_channels
snake_case_ = v_channels
snake_case_ = cross_attention_shape_for_attention
snake_case_ = self_attention_widening_factor
snake_case_ = cross_attention_widening_factor
snake_case_ = hidden_act
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = use_query_residual
# masked language modeling attributes
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
# image classification attributes
snake_case_ = image_size
# flow attributes
snake_case_ = train_size
# multimodal autoencoding attributes
snake_case_ = num_frames
snake_case_ = audio_samples_per_frame
snake_case_ = samples_per_patch
snake_case_ = output_shape
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
if self.task == "multiple-choice":
snake_case_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 1e-4
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 4_0 , _lowerCAmelCase : int = 4_0 , ) -> Union[str, Any]:
"""simple docstring"""
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = preprocessor.num_special_tokens_to_add(__a )
snake_case_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [" ".join(["a"] ) * seq_length] * batch_size
snake_case_ = dict(preprocessor(__a , return_tensors=__a ) )
snake_case_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case_ = self._generate_dummy_images(__a , __a , __a , __a )
snake_case_ = dict(preprocessor(images=__a , return_tensors=__a ) )
snake_case_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 283 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """perceiver"""
def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ):
super().__init__(**__a )
UpperCAmelCase_ = num_latents
UpperCAmelCase_ = d_latents
UpperCAmelCase_ = d_model
UpperCAmelCase_ = num_blocks
UpperCAmelCase_ = num_self_attends_per_block
UpperCAmelCase_ = num_self_attention_heads
UpperCAmelCase_ = num_cross_attention_heads
UpperCAmelCase_ = qk_channels
UpperCAmelCase_ = v_channels
UpperCAmelCase_ = cross_attention_shape_for_attention
UpperCAmelCase_ = self_attention_widening_factor
UpperCAmelCase_ = cross_attention_widening_factor
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
# image classification attributes
UpperCAmelCase_ = image_size
# flow attributes
UpperCAmelCase_ = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = audio_samples_per_frame
UpperCAmelCase_ = samples_per_patch
UpperCAmelCase_ = output_shape
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-4
def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase_ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("input_ids" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 78 | 0 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class A:
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[str] = parent
_UpperCamelCase :Optional[int] = batch_size
_UpperCamelCase :int = seq_length
_UpperCamelCase :str = is_training
_UpperCamelCase :Dict = use_input_mask
_UpperCamelCase :Any = use_token_type_ids
_UpperCamelCase :Any = use_labels
_UpperCamelCase :List[str] = vocab_size
_UpperCamelCase :List[str] = hidden_size
_UpperCamelCase :Union[str, Any] = num_hidden_layers
_UpperCamelCase :List[Any] = num_attention_heads
_UpperCamelCase :List[str] = intermediate_size
_UpperCamelCase :Any = hidden_act
_UpperCamelCase :Optional[Any] = hidden_dropout_prob
_UpperCamelCase :Any = attention_probs_dropout_prob
_UpperCamelCase :str = max_position_embeddings
_UpperCamelCase :List[str] = type_vocab_size
_UpperCamelCase :List[Any] = type_sequence_label_size
_UpperCamelCase :Dict = initializer_range
_UpperCamelCase :Any = num_labels
_UpperCamelCase :Optional[Any] = num_choices
_UpperCamelCase :Optional[int] = scope
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase :Dict = None
if self.use_input_mask:
_UpperCamelCase :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase :List[Any] = None
_UpperCamelCase :Tuple = None
_UpperCamelCase :int = None
if self.use_labels:
_UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase :str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[int] = MPNetModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase :Any = model(__a , __a )
_UpperCamelCase :Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :str = MPNetForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase :Dict = model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :List[str] = self.num_labels
_UpperCamelCase :List[Any] = MPNetForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase :Tuple = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :List[str] = self.num_choices
_UpperCamelCase :Tuple = MPNetForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase :List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase :Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase :List[Any] = model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_UpperCamelCase :List[Any] = self.num_labels
_UpperCamelCase :Optional[Any] = MPNetForTokenClassification(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase :Any = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Dict = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) :List[str] = config_and_inputs
_UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Dict = MPNetModelTester(self )
_UpperCamelCase :List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__a )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__a )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__a )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__a )
def _UpperCamelCase( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__a )
@require_torch
class A( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :List[Any] = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
_UpperCamelCase :Any = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCamelCase :Any = model(__a )[0]
_UpperCamelCase :Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __a )
_UpperCamelCase :Optional[int] = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 355 | '''simple docstring'''
import requests
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None:
'''simple docstring'''
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 2_00:
UpperCAmelCase_ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 78 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int = 600851475143 ):
'''simple docstring'''
try:
A: Any = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
A: Dict = 2
A: List[str] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A: int = i
while n % i == 0:
A: str = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 135 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ ):
def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = 1
UpperCAmelCase_ = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = dict(scheduler.config )
UpperCAmelCase_ = True
UpperCAmelCase_ = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase (self : int ):
self.enable_attention_slicing(__a )
def _lowercase (self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase (self : Optional[int] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ):
UpperCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCAmelCase_ = self.segmentation_model(**__a )
UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 78 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : str = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 641 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 129 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: int = "huggingface/label-files"
__lowerCAmelCase: Optional[int] = "imagenet-1k-id2label.json"
__lowerCAmelCase: Dict = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase: Any = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase: Optional[int] = {v: k for k, v in idalabel.items()}
__lowerCAmelCase: Any = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__lowerCAmelCase: str = BitConfig(
conv_layer=snake_case_ , num_labels=1_0_0_0 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
if "stem.conv" in name:
__lowerCAmelCase: Union[str, Any] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__lowerCAmelCase: Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
__lowerCAmelCase: List[str] = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
__lowerCAmelCase: str = "bit." + name
if "bit" not in name and "classifier" not in name:
__lowerCAmelCase: Dict = "bit.encoder." + name
return name
def a__ ( ) -> Dict:
__lowerCAmelCase: List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase: Optional[int] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = get_config(snake_case_ )
# load original model from timm
__lowerCAmelCase: Optional[int] = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
__lowerCAmelCase: Optional[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
__lowerCAmelCase: Optional[Any] = state_dict.pop(snake_case_ )
__lowerCAmelCase: List[Any] = val.squeeze() if "head" in key else val
# load HuggingFace model
__lowerCAmelCase: str = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
__lowerCAmelCase: List[str] = create_transform(**resolve_data_config({} , model=snake_case_ ) )
__lowerCAmelCase: Dict = transform.transforms
__lowerCAmelCase: Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
__lowerCAmelCase: Any = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__lowerCAmelCase: Dict = prepare_img()
__lowerCAmelCase: str = transform(snake_case_ ).unsqueeze(0 )
__lowerCAmelCase: Any = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = model(snake_case_ )
__lowerCAmelCase: str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
__lowerCAmelCase: Tuple = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__A = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 346 | '''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.