code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def a__ ( lowerCAmelCase ) -> str:
random.seed(lowerCAmelCase )
np.random.seed(lowerCAmelCase )
torch.manual_seed(lowerCAmelCase )
torch.cuda.manual_seed_all(lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase = 0.9_999 , _lowerCamelCase = 0.0 , _lowerCamelCase = 0 , _lowerCamelCase = False , _lowerCamelCase = 1.0 , _lowerCamelCase = 2 / 3 , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
if isinstance(_lowerCamelCase , torch.nn.Module ):
UpperCAmelCase__ : Any = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase , )
UpperCAmelCase__ : str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCAmelCase__ : List[str] = True
if kwargs.get("""max_value""" , _lowerCamelCase ) is not None:
UpperCAmelCase__ : Dict = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase )
UpperCAmelCase__ : Dict = kwargs["""max_value"""]
if kwargs.get("""min_value""" , _lowerCamelCase ) is not None:
UpperCAmelCase__ : List[Any] = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase )
UpperCAmelCase__ : Dict = kwargs["""min_value"""]
UpperCAmelCase__ : Tuple = list(_lowerCamelCase )
UpperCAmelCase__ : int = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , _lowerCamelCase ) is not None:
UpperCAmelCase__ : Any = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase )
self.to(device=kwargs["""device"""] )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : int = decay
UpperCAmelCase__ : List[str] = min_decay
UpperCAmelCase__ : str = update_after_step
UpperCAmelCase__ : int = use_ema_warmup
UpperCAmelCase__ : List[Any] = inv_gamma
UpperCAmelCase__ : Tuple = power
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Union[str, Any] = None # set in `step()`
UpperCAmelCase__ : Tuple = model_cls
UpperCAmelCase__ : List[str] = model_config
@classmethod
def _a (cls , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = model_cls.load_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = model_cls.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Tuple = cls(model.parameters() , model_cls=_lowerCamelCase , model_config=model.config )
ema_model.load_state_dict(_lowerCamelCase )
return ema_model
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
UpperCAmelCase__ : Optional[Any] = self.model_cls.from_config(self.model_config )
UpperCAmelCase__ : List[str] = self.state_dict()
state_dict.pop("""shadow_params""" , _lowerCamelCase )
model.register_to_config(**_lowerCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCAmelCase__ : Union[str, Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCAmelCase__ : Union[str, Any] = (1 + step) / (10 + step)
UpperCAmelCase__ : Optional[int] = min(_lowerCamelCase , self.decay )
# make sure decay is not smaller than min_decay
UpperCAmelCase__ : Any = max(_lowerCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if isinstance(_lowerCamelCase , torch.nn.Module ):
UpperCAmelCase__ : int = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase , )
UpperCAmelCase__ : Any = parameters.parameters()
UpperCAmelCase__ : List[str] = list(_lowerCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCAmelCase__ : Optional[int] = self.get_decay(self.optimization_step )
UpperCAmelCase__ : Optional[Any] = decay
UpperCAmelCase__ : int = 1 - decay
UpperCAmelCase__ : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _lowerCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCAmelCase__ : int = deepspeed.zero.GatheredParameters(_lowerCamelCase , modifier_rank=_lowerCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = list(_lowerCamelCase )
for s_param, param in zip(self.shadow_params , _lowerCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def _a (self , _lowerCamelCase=None , _lowerCamelCase=None ):
"""simple docstring"""
UpperCAmelCase__ : Any = [
p.to(device=_lowerCamelCase , dtype=_lowerCamelCase ) if p.is_floating_point() else p.to(device=_lowerCamelCase )
for p in self.shadow_params
]
def _a (self ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = [param.detach().cpu().clone() for param in parameters]
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , _lowerCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCAmelCase__ : Any = None
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = copy.deepcopy(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
UpperCAmelCase__ : Optional[int] = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , _lowerCamelCase ):
raise ValueError("""Invalid min_decay""" )
UpperCAmelCase__ : List[Any] = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , _lowerCamelCase ):
raise ValueError("""Invalid optimization_step""" )
UpperCAmelCase__ : int = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , _lowerCamelCase ):
raise ValueError("""Invalid update_after_step""" )
UpperCAmelCase__ : Tuple = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _lowerCamelCase ):
raise ValueError("""Invalid use_ema_warmup""" )
UpperCAmelCase__ : Optional[Any] = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
UpperCAmelCase__ : Any = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
UpperCAmelCase__ : Union[str, Any] = state_dict.get("""shadow_params""" , _lowerCamelCase )
if shadow_params is not None:
UpperCAmelCase__ : str = shadow_params
if not isinstance(self.shadow_params , _lowerCamelCase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(_lowerCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 182
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
# Load configuration defined in the metadata file
with open(lowerCAmelCase ) as metadata_file:
UpperCAmelCase__ : Optional[int] = json.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = LukeConfig(use_entity_aware_attention=lowerCAmelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase , map_location="""cpu""" )
# Load the entity vocab file
UpperCAmelCase__ : List[str] = load_entity_vocab(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase__ : Any = AddedToken("""<ent>""" , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
UpperCAmelCase__ : Tuple = AddedToken("""<ent2>""" , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : int = LukeTokenizer.from_pretrained(lowerCAmelCase )
# Initialize the embeddings of the special tokens
UpperCAmelCase__ : Any = state_dict["""embeddings.word_embeddings.weight"""]
UpperCAmelCase__ : Any = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
UpperCAmelCase__ : List[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
UpperCAmelCase__ : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase__ : Any = F"""encoder.layer.{layer_index}.attention.self."""
UpperCAmelCase__ : List[Any] = state_dict[prefix + matrix_name]
UpperCAmelCase__ : Dict = state_dict[prefix + matrix_name]
UpperCAmelCase__ : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase__ : List[str] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCAmelCase__ : int = entity_emb[entity_vocab["""[MASK]"""]]
UpperCAmelCase__ : Tuple = LukeModel(config=lowerCAmelCase ).eval()
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if not (len(lowerCAmelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {', '.join(lowerCAmelCase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
UpperCAmelCase__ : Optional[Any] = LukeTokenizer.from_pretrained(lowerCAmelCase , task="""entity_classification""" )
UpperCAmelCase__ : Dict = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
UpperCAmelCase__ : List[Any] = (39, 42)
UpperCAmelCase__ : str = tokenizer(lowerCAmelCase , entity_spans=[span] , add_prefix_space=lowerCAmelCase , return_tensors="""pt""" )
UpperCAmelCase__ : List[str] = model(**lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 42, 10_24) )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
UpperCAmelCase__ : Optional[int] = torch.Size((1, 42, 7_68) )
UpperCAmelCase__ : Optional[Any] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, 1, 10_24) )
UpperCAmelCase__ : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
UpperCAmelCase__ : Tuple = torch.Size((1, 1, 7_68) )
UpperCAmelCase__ : List[str] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCAmelCase ) )
model.save_pretrained(lowerCAmelCase )
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = {}
with open(lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = line.rstrip().split("""\t""" )
UpperCAmelCase__ : int = index
return entity_vocab
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_A = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 182
| 1
|
from __future__ import annotations
import numpy as np
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> tuple[np.ndarray, np.ndarray]:
lowercase__ , lowercase__ = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
lowercase__ = (
'\'table\' has to be of square shaped array but got a '
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
lowercase__ = np.zeros((rows, columns) )
lowercase__ = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
lowercase__ = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
lowercase__ = (table[i][j] - total) / upper[j][j]
lowercase__ = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
lowercase__ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 1
|
def lowerCamelCase__ ( _lowercase , _lowercase = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
UpperCAmelCase_ : Tuple = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
UpperCAmelCase_ : Dict = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowercase , 1 ):
if n < _p:
# then we have our last prime to check
UpperCAmelCase_ : Dict = primes[:idx]
break
UpperCAmelCase_, UpperCAmelCase_ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCAmelCase_ : Any = False
for r in range(_lowercase ):
UpperCAmelCase_ : Optional[int] = pow(_lowercase , d * 2**r , _lowercase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCAmelCase_ : int = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase__ ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 30
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = 250
UpperCamelCase = ids_tensor((batch_size, length) , lowerCamelCase_ )
UpperCamelCase = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxLengthCriteria(max_length=10 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
| 537
| 0
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : int = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Any = '''efficientformer'''
def __init__(self : List[Any] , A__ : List[int] = [3, 2, 6, 4] , A__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , A__ : List[bool] = [True, True, True, True] , A__ : int = 4_4_8 , A__ : int = 3_2 , A__ : int = 4 , A__ : int = 7 , A__ : int = 5 , A__ : int = 8 , A__ : int = 4 , A__ : float = 0.0 , A__ : int = 1_6 , A__ : int = 3 , A__ : int = 3 , A__ : int = 3 , A__ : int = 2 , A__ : int = 1 , A__ : float = 0.0 , A__ : int = 1 , A__ : bool = True , A__ : bool = True , A__ : float = 1e-5 , A__ : str = "gelu" , A__ : float = 0.0_2 , A__ : float = 1e-12 , A__ : int = 2_2_4 , A__ : float = 1e-05 , **A__ : List[str] , ) -> None:
super().__init__(**A__ )
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = hidden_sizes
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = patch_size
lowercase = num_channels
lowercase = depths
lowercase = mlp_expansion_ratio
lowercase = downsamples
lowercase = dim
lowercase = key_dim
lowercase = attention_ratio
lowercase = resolution
lowercase = pool_size
lowercase = downsample_patch_size
lowercase = downsample_stride
lowercase = downsample_pad
lowercase = drop_path_rate
lowercase = num_metaad_blocks
lowercase = distillation
lowercase = use_layer_scale
lowercase = layer_scale_init_value
lowercase = image_size
lowercase = batch_norm_eps
| 712
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
def __init__(self : Any , A__ : int , A__ : int , A__ : float , **A__ : int ) -> List[str]:
lowercase = feature_size
lowercase = sampling_rate
lowercase = padding_value
lowercase = kwargs.pop("padding_side" , "right" )
lowercase = kwargs.pop("return_attention_mask" , A__ )
super().__init__(**A__ )
def UpperCAmelCase__ (self : Tuple , A__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A__ : Union[bool, str, PaddingStrategy] = True , A__ : Optional[int] = None , A__ : bool = False , A__ : Optional[int] = None , A__ : Optional[bool] = None , A__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowercase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
lowercase = processed_features[self.model_input_names[0]]
lowercase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
lowercase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
lowercase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
lowercase = "tf"
elif is_torch_tensor(A__ ):
lowercase = "pt"
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
lowercase = "np"
else:
raise ValueError(
f'type of {first_element} unknown: {type(A__ )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowercase = to_numpy(A__ )
else:
lowercase = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase = self._get_padding_strategies(padding=A__ , max_length=A__ )
lowercase = processed_features[self.model_input_names[0]]
lowercase = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
lowercase = []
for i in range(A__ ):
lowercase = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowercase = PaddingStrategy.MAX_LENGTH
lowercase = {}
for i in range(A__ ):
# padding
lowercase = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowercase = []
if value.dtype is np.dtype(np.floataa ):
lowercase = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : Union[Dict[str, np.ndarray], BatchFeature] , A__ : Optional[int] = None , A__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A__ : Optional[int] = None , A__ : Optional[bool] = None , ) -> dict:
lowercase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
lowercase = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
lowercase = np.pad(
processed_features["attention_mask"] , (0, difference) )
lowercase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase = np.pad(
A__ , A__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowercase = np.pad(
processed_features["attention_mask"] , (difference, 0) )
lowercase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase = np.pad(
A__ , A__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ (self : Dict , A__ : Union[Dict[str, np.ndarray], BatchFeature] , A__ : Optional[int] = None , A__ : Optional[int] = None , A__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
lowercase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = len(A__ ) > max_length
if needs_to_be_truncated:
lowercase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCAmelCase__ (self : int , A__ : Optional[Any]=False , A__ : Dict=None ) -> Optional[int]:
# Get padding strategy
if padding is not False:
if padding is True:
lowercase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
lowercase = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
lowercase = padding
else:
lowercase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 459
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1 / 2_5_5 , __SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : Optional[int] = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
snake_case__ : str = parent
snake_case__ : str = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : Union[str, Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : List[Any] = size
snake_case__ : str = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : int = image_std
snake_case__ : Dict = do_rescale
snake_case__ : str = rescale_factor
snake_case__ : Optional[int] = do_pad
def __UpperCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
if not batched:
snake_case__ : str = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
snake_case__ , snake_case__ : List[Any] = image.size
else:
snake_case__ , snake_case__ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Tuple = int(self.size["""shortest_edge"""] * h / w )
snake_case__ : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
snake_case__ : str = self.size["""shortest_edge"""]
snake_case__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h )
else:
snake_case__ : Tuple = self.size["""shortest_edge"""]
snake_case__ : Union[str, Any] = self.size["""shortest_edge"""]
else:
snake_case__ : Dict = []
for image in image_inputs:
snake_case__ , snake_case__ : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[str] = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
snake_case__ : List[str] = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : List[str] = DetaImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_rescale""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_pad""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : int = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : Dict = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : str = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : str = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self ):
# prepare image and target
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
snake_case__ : str = json.loads(f.read() )
snake_case__ : Any = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
snake_case__ : Optional[Any] = DetaImageProcessor()
snake_case__ : Optional[Any] = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __SCREAMING_SNAKE_CASE )
snake_case__ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
snake_case__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __SCREAMING_SNAKE_CASE ) )
# verify orig_size
snake_case__ : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __SCREAMING_SNAKE_CASE ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __SCREAMING_SNAKE_CASE ) )
@slow
def __UpperCamelCase ( self ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
snake_case__ : Optional[Any] = json.loads(f.read() )
snake_case__ : Dict = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
snake_case__ : Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
snake_case__ : Union[str, Any] = DetaImageProcessor(format="""coco_panoptic""" )
snake_case__ : Optional[int] = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
snake_case__ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
snake_case__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
snake_case__ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
snake_case__ : Union[str, Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __SCREAMING_SNAKE_CASE ) )
# verify masks
snake_case__ : Any = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __SCREAMING_SNAKE_CASE )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __SCREAMING_SNAKE_CASE ) )
# verify size
snake_case__ : Optional[int] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __SCREAMING_SNAKE_CASE ) )
| 38
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase_ ( ):
lowercase_ = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
lowercase_ = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = dct.pop(UpperCAmelCase__ )
lowercase_ = val
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase_ = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase__ , requires_grad=UpperCAmelCase__ ), v_bias) )
lowercase_ = qkv_bias
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = 3_6_4 if """coco""" in model_name else 2_2_4
lowercase_ = InstructBlipVisionConfig(image_size=UpperCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase_ = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase_ = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase_ = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
lowercase_ = InstructBlipConfig(vision_config=UpperCAmelCase__ , text_config=UpperCAmelCase__ , qformer_config=UpperCAmelCase__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=False ):
lowercase_ = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
lowercase_ = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase_ = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
lowercase_ , lowercase_ = get_blipa_config(UpperCAmelCase__ )
lowercase_ = InstructBlipForConditionalGeneration(UpperCAmelCase__ ).eval()
lowercase_ = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
lowercase_ , lowercase_ = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase_ = """cuda:1""" if torch.cuda.is_available() else """cpu"""
lowercase_ = """cuda:2""" if torch.cuda.is_available() else """cpu"""
lowercase_ , lowercase_ , lowercase_ = load_model_and_preprocess(
name=UpperCAmelCase__ , model_type=UpperCAmelCase__ , is_eval=UpperCAmelCase__ , device=UpperCAmelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase_ = original_model.state_dict()
lowercase_ = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ = state_dict.pop(UpperCAmelCase__ )
if key.startswith("""Qformer.bert""" ):
lowercase_ = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase_ = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
lowercase_ = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase_ = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
lowercase_ = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
lowercase_ = key.replace("""t5""" , """language""" )
lowercase_ = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase__ , UpperCAmelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
lowercase_ = load_demo_image()
lowercase_ = """What is unusual about this image?"""
# create processor
lowercase_ = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
lowercase_ = InstructBlipProcessor(
image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ , )
lowercase_ = processor(images=UpperCAmelCase__ , text=UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# make sure processor creates exact same pixel values
lowercase_ = vis_processors["""eval"""](UpperCAmelCase__ ).unsqueeze(0 ).to(UpperCAmelCase__ )
lowercase_ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
hf_model.to(UpperCAmelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
lowercase_ = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
lowercase_ = hf_model(**UpperCAmelCase__ ).logits
else:
lowercase_ = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
lowercase_ = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(UpperCAmelCase__ )
lowercase_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
lowercase_ = hf_model(**UpperCAmelCase__ , labels=UpperCAmelCase__ ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase_ = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCAmelCase__ , atol=UpperCAmelCase__ )
print("""Looks ok!""" )
print("""Generating with original model...""" )
lowercase_ = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
lowercase_ = hf_model.generate(
**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase_ = 2
print("""Original generation:""" , UpperCAmelCase__ )
lowercase_ = processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase_ = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
a = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 412
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''bloom'''
SCREAMING_SNAKE_CASE_ = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=2_5_0_8_8_0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase__ : List[str] = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = hidden_size if n_embed is None else n_embed
UpperCamelCase__ : Tuple = n_layer
UpperCamelCase__ : Dict = n_head
UpperCamelCase__ : Optional[Any] = layer_norm_epsilon
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : str = use_cache
UpperCamelCase__ : Any = pretraining_tp
UpperCamelCase__ : Union[str, Any] = apply_residual_connection_post_layernorm
UpperCamelCase__ : int = hidden_dropout
UpperCamelCase__ : Any = attention_dropout
UpperCamelCase__ : Any = bos_token_id
UpperCamelCase__ : List[str] = eos_token_id
UpperCamelCase__ : List[str] = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = version.parse('''1.12''' )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "default" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
UpperCamelCase__ : Any = 0
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCamelCase__ : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCamelCase__ : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
"""simple docstring"""
return 1e-3
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
UpperCamelCase__ : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCamelCase__ : List[Any] = seqlen + 2
UpperCamelCase__ : Any = self._config.hidden_size // self.num_attention_heads
UpperCamelCase__ : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase__ : int = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase__ : List[Any] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
UpperCamelCase__ : Tuple = common_inputs['''attention_mask''']
if self.use_past:
UpperCamelCase__ : List[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCamelCase__ : Tuple = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return 1_3
| 708
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
lowerCamelCase , lowerCamelCase =parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
lowerCamelCase =rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
lowerCamelCase =rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase =args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 462
| 0
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
lowercase_ = "mask2former"
lowercase_ = ["swin"]
lowercase_ = {"hidden_size": "hidden_dim"}
def __init__( self : Any , UpperCamelCase_ : Optional[Dict] = None , UpperCamelCase_ : int = 256 , UpperCamelCase_ : int = 256 , UpperCamelCase_ : int = 256 , UpperCamelCase_ : int = 1024 , UpperCamelCase_ : str = "relu" , UpperCamelCase_ : int = 6 , UpperCamelCase_ : int = 10 , UpperCamelCase_ : int = 8 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 2048 , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : int = 4 , UpperCamelCase_ : int = 255 , UpperCamelCase_ : int = 100 , UpperCamelCase_ : float = 0.1 , UpperCamelCase_ : float = 2.0 , UpperCamelCase_ : float = 5.0 , UpperCamelCase_ : float = 5.0 , UpperCamelCase_ : int = 12544 , UpperCamelCase_ : float = 3.0 , UpperCamelCase_ : float = 0.75 , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : bool = True , UpperCamelCase_ : List[int] = [4, 8, 16, 32] , UpperCamelCase_ : bool = None , **UpperCamelCase_ : int , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.")
__UpperCAmelCase : List[Any] = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCamelCase_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCamelCase_ , UpperCamelCase_):
__UpperCAmelCase : Dict = backbone_config.pop("model_type")
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Dict = config_class.from_dict(UpperCamelCase_)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported)}")
__UpperCAmelCase : Optional[int] = backbone_config
__UpperCAmelCase : Union[str, Any] = feature_size
__UpperCAmelCase : Dict = mask_feature_size
__UpperCAmelCase : List[Any] = hidden_dim
__UpperCAmelCase : Tuple = encoder_feedforward_dim
__UpperCAmelCase : Tuple = activation_function
__UpperCAmelCase : Optional[Any] = encoder_layers
__UpperCAmelCase : Optional[int] = decoder_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = dropout
__UpperCAmelCase : Any = dim_feedforward
__UpperCAmelCase : Tuple = pre_norm
__UpperCAmelCase : Any = enforce_input_projection
__UpperCAmelCase : Union[str, Any] = common_stride
__UpperCAmelCase : List[str] = ignore_value
__UpperCAmelCase : Optional[Any] = num_queries
__UpperCAmelCase : Optional[Any] = no_object_weight
__UpperCAmelCase : Tuple = class_weight
__UpperCAmelCase : List[Any] = mask_weight
__UpperCAmelCase : int = dice_weight
__UpperCAmelCase : int = train_num_points
__UpperCAmelCase : Optional[Any] = oversample_ratio
__UpperCAmelCase : Dict = importance_sample_ratio
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : List[Any] = init_xavier_std
__UpperCAmelCase : str = use_auxiliary_loss
__UpperCAmelCase : Any = feature_strides
__UpperCAmelCase : Optional[int] = output_auxiliary_logits
__UpperCAmelCase : str = decoder_layers
super().__init__(**UpperCamelCase_)
@classmethod
def a_ ( cls : List[str] , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return cls(
backbone_config=UpperCamelCase_ , **UpperCamelCase_ , )
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : str = copy.deepcopy(self.__dict__)
__UpperCAmelCase : Optional[Any] = self.backbone_config.to_dict()
__UpperCAmelCase : Tuple = self.__class__.model_type
return output
| 77
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 3_84
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = "gelu"
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 5_12
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = 1_28
_UpperCamelCase = 2
_UpperCamelCase = 9
_UpperCamelCase = 1
_UpperCamelCase = None
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertModel(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertForMaskedLM(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFConvBertForTokenClassification(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : int = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Any = False
__lowercase : List[str] = False
__lowercase : List[Any] = False
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowercase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = True
if hasattr(lowerCamelCase_ , "use_cache" ):
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , "key_length" , lowerCamelCase_ )
for model_class in self.all_model_classes:
_UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
_UpperCamelCase = os.path.join(lowerCamelCase_ , "saved_model" , "1" )
_UpperCamelCase = tf.keras.models.load_model(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
if self.is_encoder_decoder:
_UpperCamelCase = outputs["encoder_hidden_states"]
_UpperCamelCase = outputs["encoder_attentions"]
else:
_UpperCamelCase = outputs["hidden_states"]
_UpperCamelCase = outputs["attentions"]
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
_UpperCamelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , "key_length" , lowerCamelCase_ )
_UpperCamelCase = getattr(self.model_tester , "key_length" , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ ):
_UpperCamelCase = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
_UpperCamelCase = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ ):
_UpperCamelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_UpperCamelCase = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(lowerCamelCase_ )[0]
_UpperCamelCase = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
_UpperCamelCase = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 147
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ):
return self.__class__(**{k: copy.deepcopy(__lowerCamelCase ) for k, v in self.__dict__.items()} )
| 705
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase__( UpperCamelCase__ : Callable[[int | float], int | float] , UpperCamelCase__ : int | float , UpperCamelCase__ : int | float , UpperCamelCase__ : int = 1_00 , )->float:
A__ = x_start
A__ = fnc(UpperCamelCase__ )
A__ = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
A__ = (x_end - x_start) / steps + xa
A__ = fnc(UpperCamelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A__ = xa
A__ = fxa
return length
if __name__ == "__main__":
def UpperCamelCase__( UpperCamelCase__ : Dict )->List[Any]:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
a__: List[str] = 10
while i <= 100_000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 212
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase ={
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase =HfArgumentParser(InitializationArguments)
_lowerCamelCase =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 681
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='FlavaImageProcessor'
lowerCamelCase__ =('BertTokenizer', 'BertTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
__snake_case : Union[str, Any] = self.image_processor
def __call__(self , a_ = None , a_ = None , a_ = True , a_ = False , a_ = False , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
if images is not None:
__snake_case : Union[str, Any] = self.image_processor(
a_ , return_image_mask=a_ , return_codebook_pixels=a_ , return_tensors=a_ , **a_ , )
if text is not None and images is not None:
encoding.update(a_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.tokenizer.model_input_names
__snake_case : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a_ , )
return self.image_processor
| 229
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase ( _snake_case : List[Any] ) ->Optional[int]:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase ( ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_snake_case )
__snake_case : Optional[int] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_snake_case )
EnvironmentCommand.register_subcommand(_snake_case )
TestCommand.register_subcommand(_snake_case )
RunBeamCommand.register_subcommand(_snake_case )
DummyDataCommand.register_subcommand(_snake_case )
# Parse args
__snake_case , __snake_case : List[str] = parser.parse_known_args()
if not hasattr(_snake_case , '''func''' ):
parser.print_help()
exit(1 )
__snake_case : Any = parse_unknown_args(_snake_case )
# Run
__snake_case : int = args.func(_snake_case , **_snake_case )
service.run()
if __name__ == "__main__":
main()
| 229
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__magic_name__ : int = logging.get_logger(__name__)
class UpperCamelCase__ ( __snake_case ):
"""simple docstring"""
UpperCAmelCase__ = ['pixel_values']
def __init__( self : int , __A : str = True , __A : str = None , __A : List[Any] = PILImageResampling.BILINEAR , __A : Optional[Any] = True , __A : Union[str, Any] = 1 / 2_5_5 , __A : str = True , __A : Dict = None , __A : Tuple = True , **__A : Dict , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
_lowercase = size if size is not None else {"shortest_edge": 2_2_4}
_lowercase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_lowercase = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
_lowercase = get_size_dict(__UpperCamelCase , param_name="crop_size" )
_lowercase = do_resize
_lowercase = size
_lowercase = resample
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_flip_channel_order
def snake_case ( self : List[Any] , __A : Optional[int] , __A : List[Any] , __A : Optional[Any] = PIL.Image.BILINEAR , __A : Optional[int] = None , **__A : Optional[Any] , ):
"""simple docstring"""
_lowercase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowercase = get_resize_output_image_size(__UpperCamelCase , size=size["shortest_edge"] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def snake_case ( self : Dict , __A : Optional[int] , __A : Optional[int] , __A : List[Any] = None , **__A : Any , ):
"""simple docstring"""
_lowercase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def snake_case ( self : Any , __A : Optional[Any] , __A : Any , __A : Any = None , **__A : Tuple , ):
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def snake_case ( self : Tuple , __A : str , __A : int = None ):
"""simple docstring"""
return flip_channel_order(__UpperCamelCase , data_format=__UpperCamelCase )
def snake_case ( self : str , __A : Optional[int] , __A : str = None , __A : List[str] = None , __A : List[Any] = None , __A : str = None , __A : Union[str, Any] = None , __A : List[Any] = None , __A : str = None , __A : Dict = None , __A : Dict = None , __A : Optional[Any] = ChannelDimension.FIRST , **__A : Union[str, Any] , ):
"""simple docstring"""
_lowercase = do_resize if do_resize is not None else self.do_resize
_lowercase = resample if resample is not None else self.resample
_lowercase = do_rescale if do_rescale is not None else self.do_rescale
_lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowercase = size if size is not None else self.size
_lowercase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_lowercase = crop_size if crop_size is not None else self.crop_size
_lowercase = get_size_dict(__UpperCamelCase , param_name="crop_size" )
_lowercase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_lowercase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_lowercase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_lowercase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_lowercase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowercase = [self.flip_channel_order(image=__UpperCamelCase ) for image in images]
_lowercase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_lowercase = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
def snake_case ( self : str , __A : Any , __A : Tuple = None ):
"""simple docstring"""
_lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__UpperCamelCase ):
_lowercase = target_sizes.numpy()
_lowercase = []
for idx in range(len(__UpperCamelCase ) ):
_lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__UpperCamelCase )
_lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCamelCase )
else:
_lowercase = logits.argmax(dim=1 )
_lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 497
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 187
| 0
|
from typing import List
import numpy as np
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {key: len(_SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
SCREAMING_SNAKE_CASE_ = max(lists_lengths.values() , default=0 )
return max(1 , _SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for group_idx in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
SCREAMING_SNAKE_CASE_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
SCREAMING_SNAKE_CASE_ = range(_SCREAMING_SNAKE_CASE , start + num_shards_to_add )
shards_indices_per_group.append(_SCREAMING_SNAKE_CASE )
return shards_indices_per_group
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE )
if num_shards == 1:
return [dict(_SCREAMING_SNAKE_CASE )]
else:
SCREAMING_SNAKE_CASE_ = _distribute_shards(num_shards=_SCREAMING_SNAKE_CASE , max_num_jobs=_SCREAMING_SNAKE_CASE )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_SCREAMING_SNAKE_CASE ) )
]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[dict] ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _SCREAMING_SNAKE_CASE )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.random.Generator , _SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {len(_SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
SCREAMING_SNAKE_CASE_ = {}
for size in list_sizes:
SCREAMING_SNAKE_CASE_ = list(range(_SCREAMING_SNAKE_CASE ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
SCREAMING_SNAKE_CASE_ = dict(_SCREAMING_SNAKE_CASE )
for key, value in shuffled_kwargs.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = [value[i] for i in indices_per_size[len(_SCREAMING_SNAKE_CASE )]]
return shuffled_kwargs
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ = '636036'
SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ = workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' )
return results
| 620
| 0
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __snake_case :
"""simple docstring"""
def __init__( self :Dict , UpperCamelCase__ :Any , UpperCamelCase__ :int = 13 , UpperCamelCase__ :int = 64 , UpperCamelCase__ :int = 2 , UpperCamelCase__ :int = 3 , UpperCamelCase__ :int = 3 , UpperCamelCase__ :bool = True , UpperCamelCase__ :bool = True , UpperCamelCase__ :int = 128 , UpperCamelCase__ :Tuple=[16, 32, 64, 128] , UpperCamelCase__ :int = 7 , UpperCamelCase__ :int = 4 , UpperCamelCase__ :int = 37 , UpperCamelCase__ :str = "gelu" , UpperCamelCase__ :float = 0.1 , UpperCamelCase__ :float = 0.1 , UpperCamelCase__ :int = 10 , UpperCamelCase__ :float = 0.02 , UpperCamelCase__ :int = 2 , UpperCamelCase__ :int = 1 , UpperCamelCase__ :int = 128 , UpperCamelCase__ :List[int] = [2, 2, 2, 2] , UpperCamelCase__ :int = 2 , UpperCamelCase__ :int = 2 , ):
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = encoder_stride
_a = num_attention_outputs
_a = embed_dim
_a = embed_dim + 1
_a = resolution
_a = depths
_a = hidden_sizes
_a = dim
_a = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Optional[int] ):
_a = TFEfficientFormerModel(config=UpperCamelCase__ )
_a = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str , UpperCamelCase__ :List[Any] ):
_a = self.type_sequence_label_size
_a = TFEfficientFormerForImageClassification(UpperCamelCase__ )
_a = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = TFEfficientFormerForImageClassification(UpperCamelCase__ )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = TFEfficientFormerModelTester(self )
_a = ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
pass
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(UpperCamelCase__ )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :str ):
def check_hidden_states_output(UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :int , UpperCamelCase__ :Dict ):
_a = model_class(UpperCamelCase__ )
_a = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
_a = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
_a = seq_length * self.model_tester.chunk_length
else:
_a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_a = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
_a = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
_a = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :List[str] , UpperCamelCase__ :Tuple=False ):
_a = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self :int ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFEfficientFormerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
_a = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
_a = getattr(self.model_tester , "key_length" , UpperCamelCase__ )
_a = getattr(self.model_tester , "chunk_length" , UpperCamelCase__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
_a = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(UpperCamelCase__ )
_a = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(UpperCamelCase__ )
_a = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_a = model_class(UpperCamelCase__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_a = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCamelCase__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_a = model(UpperCamelCase__ )
self.assertTrue(outputs_dict is not None )
def __a ( ):
"""simple docstring"""
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self :str ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=UpperCamelCase__ , return_tensors="tf" )
# forward pass
_a = model(**UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
_a = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_a = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=UpperCamelCase__ , return_tensors="tf" )
# forward pass
_a = model(**UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
_a = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_a = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 388
|
"""simple docstring"""
from math import ceil
def __a ( a, a ):
"""simple docstring"""
_a = list(range(0, a ) )
_a = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_a = []
for i in device_map_blocks:
if device_map_blocks.count(a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(a )
# Missing blocks
_a = [i for i in blocks if i not in device_map_blocks]
_a = [i for i in device_map_blocks if i not in blocks]
if len(a ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(a ) )
def __a ( a, a ):
"""simple docstring"""
_a = list(range(a ) )
_a = int(ceil(n_layers / len(a ) ) )
_a = [layers[i : i + n_blocks] for i in range(0, a, a )]
return dict(zip(a, a ) )
| 388
| 1
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class a_ :
@staticmethod
def A__ ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( __UpperCamelCase )-> str:
UpperCamelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a_ ( unittest.TestCase ):
lowercase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = DepthEstimationPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , _SCREAMING_SNAKE_CASE )
import datasets
UpperCamelCase = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
UpperCamelCase = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , _SCREAMING_SNAKE_CASE , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def A__ ( self ) -> Any:
"""simple docstring"""
pass
@slow
@require_torch
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = """Intel/dpt-large"""
UpperCamelCase = pipeline("""depth-estimation""" , model=_SCREAMING_SNAKE_CASE )
UpperCamelCase = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
UpperCamelCase = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.6_6_2 )
@require_torch
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 35
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor()
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
SCREAMING_SNAKE_CASE__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 35
| 1
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowercase_ :
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=False , __A=True , __A=99 , __A=32 , __A=4 , __A=4 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Tuple =parent
SCREAMING_SNAKE_CASE_ : List[str] =batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =seq_length
SCREAMING_SNAKE_CASE_ : int =is_training
SCREAMING_SNAKE_CASE_ : List[str] =use_input_mask
SCREAMING_SNAKE_CASE_ : Any =use_token_type_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] =use_labels
SCREAMING_SNAKE_CASE_ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE_ : Any =hidden_size
SCREAMING_SNAKE_CASE_ : int =rotary_dim
SCREAMING_SNAKE_CASE_ : Dict =num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] =num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE_ : Dict =hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str =max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] =None
SCREAMING_SNAKE_CASE_ : str =vocab_size - 1
SCREAMING_SNAKE_CASE_ : Dict =vocab_size - 1
SCREAMING_SNAKE_CASE_ : List[str] =vocab_size - 1
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : str =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Any =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict ={'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _snake_case ( self , __A , __A , __A , __A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Tuple =20
SCREAMING_SNAKE_CASE_ : Optional[int] =model_class_name(__A )
SCREAMING_SNAKE_CASE_ : List[Any] =model.init_cache(input_ids.shape[0] , __A )
SCREAMING_SNAKE_CASE_ : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
SCREAMING_SNAKE_CASE_ : List[str] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ : str =model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
SCREAMING_SNAKE_CASE_ : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE_ : Dict =model(
input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def _snake_case ( self , __A , __A , __A , __A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str =20
SCREAMING_SNAKE_CASE_ : List[str] =model_class_name(__A )
SCREAMING_SNAKE_CASE_ : int =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE_ : List[Any] =model.init_cache(input_ids.shape[0] , __A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ : str =model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
SCREAMING_SNAKE_CASE_ : Dict =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , )
SCREAMING_SNAKE_CASE_ : List[str] =model(__A , attention_mask=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class lowercase_ ( A , A , unittest.TestCase ):
__lowerCamelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : List[str] =FlaxGPTJModelTester(self )
def _snake_case ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__A , __A , __A , __A )
def _snake_case ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__A , __A , __A , __A )
@tooslow
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : List[Any] =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=__A , truncation=__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Tuple =model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] =jax.jit(model.generate )
SCREAMING_SNAKE_CASE_ : int =jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer.batch_decode(__A , skip_special_tokens=__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =[
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(__A , __A )
@is_pt_flax_cross_test
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ : List[Any] =self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE_ : str ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : int =model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : Optional[Any] =getattr(__A , __A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict =pt_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE_ : Any =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
SCREAMING_SNAKE_CASE_ : Dict =0
SCREAMING_SNAKE_CASE_ : Union[str, Any] =1
SCREAMING_SNAKE_CASE_ : List[Any] =0
SCREAMING_SNAKE_CASE_ : Any =1
SCREAMING_SNAKE_CASE_ : int =pt_model_class(__A ).eval()
SCREAMING_SNAKE_CASE_ : str =model_class(__A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A )
SCREAMING_SNAKE_CASE_ : int =fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] =pt_model(**__A ).to_tuple()
SCREAMING_SNAKE_CASE_ : Optional[Any] =fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__A )
SCREAMING_SNAKE_CASE_ : Any =model_class.from_pretrained(__A , from_pt=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =fx_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ : str =self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE_ : Dict ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : Optional[int] =model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : int =getattr(__A , __A )
SCREAMING_SNAKE_CASE_ : List[str] =pt_model_class(__A ).eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] =model_class(__A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =load_flax_weights_in_pytorch_model(__A , fx_model.params )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =pt_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE_ : int =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
SCREAMING_SNAKE_CASE_ : Tuple =0
SCREAMING_SNAKE_CASE_ : Union[str, Any] =1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0
SCREAMING_SNAKE_CASE_ : int =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] =pt_model(**__A ).to_tuple()
SCREAMING_SNAKE_CASE_ : Dict =fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__A )
SCREAMING_SNAKE_CASE_ : Dict =pt_model_class.from_pretrained(__A , from_flax=__A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict =pt_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def _snake_case ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
SCREAMING_SNAKE_CASE_ : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 443
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 443
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase__ , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class __lowerCAmelCase( lowerCAmelCase__ ):
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : GenericTensor ):
"""simple docstring"""
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE_ :List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE )
else:
raise ValueError('Unsupported framework' )
return masked_index
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : GenericTensor ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_masked_index(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : GenericTensor ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if return_tensors is None:
SCREAMING_SNAKE_CASE_ :Dict = self.framework
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
return model_inputs
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = model_inputs['input_ids']
return model_outputs
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE_ :Tuple = target_ids.shape[0]
SCREAMING_SNAKE_CASE_ :List[Any] = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE_ :List[str] = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ :Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE_ :Dict = outputs.numpy()
SCREAMING_SNAKE_CASE_ :Any = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tf.gather_nd(tf.squeeze(SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
SCREAMING_SNAKE_CASE_ :List[str] = tf.expand_dims(SCREAMING_SNAKE_CASE , 0 )
SCREAMING_SNAKE_CASE_ :Optional[Any] = tf.math.top_k(SCREAMING_SNAKE_CASE , k=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE_ :List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE_ :str = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ :int = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = probs[..., target_ids]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = probs.topk(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = []
SCREAMING_SNAKE_CASE_ :str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE_ :Any = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE_ :Optional[Any] = target_ids[p].tolist()
SCREAMING_SNAKE_CASE_ :int = p
# Filter padding out:
SCREAMING_SNAKE_CASE_ :Optional[int] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE_ :Any = self.tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(SCREAMING_SNAKE_CASE )
result.append(SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [targets]
try:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE_ :List[Any] = {}
SCREAMING_SNAKE_CASE_ :str = []
for target in targets:
SCREAMING_SNAKE_CASE_ :Optional[int] = vocab.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if id_ is None:
SCREAMING_SNAKE_CASE_ :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , max_length=1 , truncation=SCREAMING_SNAKE_CASE , )['input_ids']
if len(SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE_ :Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE_ :List[str] = list(set(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE_ :Optional[int] = np.array(SCREAMING_SNAKE_CASE )
return target_ids
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = {}
if targets is not None:
SCREAMING_SNAKE_CASE_ :Dict = self.get_target_ids(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE_ :str = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 233
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , "Tatoeba directory does not exist." )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCamelCase )
@slow
def lowercase_ ( self : Tuple ) -> int:
self.resolver.convert_models(['''heb-eng'''] )
@slow
def lowercase_ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__lowerCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 493
|
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 156
| 0
|
"""simple docstring"""
from PIL import Image
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = image.size
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = image.load()
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
snake_case = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 714
|
"""simple docstring"""
import random
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE_ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE_ )
else:
equal.append(SCREAMING_SNAKE_CASE_ )
return less, equal, greater
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(SCREAMING_SNAKE_CASE_ ) or index < 0:
return None
SCREAMING_SNAKE_CASE = items[random.randint(0, len(SCREAMING_SNAKE_CASE_ ) - 1 )]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _partition(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE_, index - (m + count) )
| 406
| 0
|
from manim import *
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : List[Any] = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase : List[Any] = Rectangle(height=0.25 , width=0.25 )
__UpperCAmelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCAmelCase : Any = [mem.copy() for i in range(6 )]
__UpperCAmelCase : str = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[Any] = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : List[str] = Text("""CPU""" , font_size=24 )
__UpperCAmelCase : List[Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
__UpperCAmelCase : Tuple = [mem.copy() for i in range(4 )]
__UpperCAmelCase : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Union[str, Any] = Text("""GPU""" , font_size=24 )
__UpperCAmelCase : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
__UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
__UpperCAmelCase : List[Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Union[str, Any] = Text("""Model""" , font_size=24 )
__UpperCAmelCase : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : str = []
__UpperCAmelCase : List[Any] = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
__UpperCAmelCase : List[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
__UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
__UpperCAmelCase : List[Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Optional[Any] = Text("""Loaded Checkpoint""" , font_size=24 )
__UpperCAmelCase : List[Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[int] = []
for i, rect in enumerate(__lowercase ):
__UpperCAmelCase : Dict = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
__UpperCAmelCase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
__UpperCAmelCase : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
__UpperCAmelCase : List[Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
__UpperCAmelCase : Union[str, Any] = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
__UpperCAmelCase : List[str] = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase : List[str] = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Tuple = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__UpperCAmelCase : Tuple = Text("""Disk""" , font_size=24 )
__UpperCAmelCase : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
__UpperCAmelCase : List[Any] = []
for i, rect in enumerate(__lowercase ):
__UpperCAmelCase : Any = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
__UpperCAmelCase : Tuple = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait()
| 63
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a =logging.get_logger(__name__)
class __UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 132
| 0
|
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_SCREAMING_SNAKE_CASE) for s in shape])}.npy"""
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[Any]=0 , _SCREAMING_SNAKE_CASE: Tuple=(4, 4, 64, 64) , _SCREAMING_SNAKE_CASE: Tuple=False) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)) , dtype=_SCREAMING_SNAKE_CASE)
return image
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: str="CompVis/stable-diffusion-v1-4") -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase : List[str] = "bf16" if fpaa else None
__lowerCAmelCase , __lowerCAmelCase : str = FlaxUNetaDConditionModel.from_pretrained(
_SCREAMING_SNAKE_CASE , subfolder="unet" , dtype=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE)
return model, params
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Any=0 , _SCREAMING_SNAKE_CASE: Any=(4, 77, 768) , _SCREAMING_SNAKE_CASE: Optional[Any]=False) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)) , dtype=_SCREAMING_SNAKE_CASE)
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
])
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self.get_latents(_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.get_encoder_hidden_states(_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model.apply(
{"params": params} , _SCREAMING_SNAKE_CASE , jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.intaa) , encoder_hidden_states=_SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
__lowerCAmelCase : int = jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
])
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.get_latents(_SCREAMING_SNAKE_CASE , shape=(4, 4, 96, 96) , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = self.get_encoder_hidden_states(_SCREAMING_SNAKE_CASE , shape=(4, 77, 1024) , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model.apply(
{"params": params} , _SCREAMING_SNAKE_CASE , jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.intaa) , encoder_hidden_states=_SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase : Tuple = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
__lowerCAmelCase : str = jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-2)
| 293
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__a: Union[str, Any] = logging.get_logger(__name__)
__a: str = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a: Dict = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a: Union[str, Any] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = BlenderbotSmallTokenizer
def __init__( self : List[str] , lowerCamelCase : List[str]=None , lowerCamelCase : str=None , lowerCamelCase : Optional[Any]="<|endoftext|>" , lowerCamelCase : Dict="<|endoftext|>" , lowerCamelCase : str="<|endoftext|>" , lowerCamelCase : str=False , lowerCamelCase : Tuple=True , **lowerCamelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCamelCase , merges=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , ) , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , **lowerCamelCase , )
_UpperCAmelCase = add_prefix_space
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=None ) -> str:
"""simple docstring"""
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 108
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = CTRLTokenizer
snake_case__ = False
snake_case__ = False
def lowerCamelCase__ ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
__lowerCamelCase : Tuple = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__lowerCamelCase : List[Any] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
__lowerCamelCase : Dict = {"unk_token": "<unk>"}
__lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
def lowerCamelCase__ ( self : int , **UpperCAmelCase : Dict ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Any ):
__lowerCamelCase : Optional[Any] = "adapt react readapt apt"
__lowerCamelCase : Dict = "adapt react readapt apt"
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : int = "adapt react readapt apt"
__lowerCamelCase : Union[str, Any] = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
__lowerCamelCase : List[Any] = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokens + [tokenizer.unk_token]
__lowerCamelCase : Tuple = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
| 718
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase : int = 128 , UpperCAmelCase : int = 256 , UpperCAmelCase : float = 2_0_0_0.0 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 2048 , UpperCAmelCase : float = 0.1 , ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Sequential(
nn.Linear(UpperCAmelCase , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , )
__lowerCamelCase : str = nn.Embedding(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : str = nn.ModuleList()
for lyr_num in range(UpperCAmelCase ):
# FiLM conditional T5 decoder
__lowerCamelCase : List[str] = DecoderLayer(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
self.decoders.append(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
__lowerCamelCase : List[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase : Optional[int] = self.conditioning_emb(UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase : int = torch.broadcast_to(
torch.arange(UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase : Optional[Any] = self.position_encoding(UpperCAmelCase )
__lowerCamelCase : List[str] = self.continuous_inputs_projection(UpperCAmelCase )
inputs += position_encodings
__lowerCamelCase : List[Any] = self.dropout(UpperCAmelCase )
# decoder: No padding present.
__lowerCamelCase : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase : Optional[Any] = [(x, self.encoder_decoder_mask(UpperCAmelCase , UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase : List[Any] = lyr(
UpperCAmelCase , conditioning_emb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )[0]
__lowerCamelCase : Dict = self.decoder_norm(UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.post_dropout(UpperCAmelCase )
__lowerCamelCase : str = self.spec_out(UpperCAmelCase )
return spec_out
class _snake_case ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None , ):
__lowerCamelCase : Union[str, Any] = self.layer[0](
UpperCAmelCase , conditioning_emb=UpperCAmelCase , attention_mask=UpperCAmelCase , )
if encoder_hidden_states is not None:
__lowerCamelCase : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase : Dict = self.layer[1](
UpperCAmelCase , key_value_states=UpperCAmelCase , attention_mask=UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase : List[Any] = self.layer[-1](UpperCAmelCase , UpperCAmelCase )
return (hidden_states,)
class _snake_case ( nn.Module ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ):
super().__init__()
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : Tuple = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=None , ):
# pre_self_attention_layer_norm
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Optional[Any] = self.FiLMLayer(UpperCAmelCase , UpperCAmelCase )
# Self-attention block
__lowerCamelCase : Optional[Any] = self.attention(UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
super().__init__()
__lowerCamelCase : str = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : List[Any] = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , ):
__lowerCamelCase : str = self.layer_norm(UpperCAmelCase )
__lowerCamelCase : Dict = self.attention(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase : List[str] = hidden_states + self.dropout(UpperCAmelCase )
return layer_output
class _snake_case ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : str = TaDenseGatedActDense(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Dict = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ):
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Union[str, Any] = self.film(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = self.DenseReluDense(UpperCAmelCase )
__lowerCamelCase : Optional[int] = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = NewGELUActivation()
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[Any] ):
__lowerCamelCase : Union[str, Any] = self.act(self.wi_a(UpperCAmelCase ) )
__lowerCamelCase : Any = self.wi_a(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = hidden_gelu * hidden_linear
__lowerCamelCase : Any = self.dropout(UpperCAmelCase )
__lowerCamelCase : List[Any] = self.wo(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Parameter(torch.ones(UpperCAmelCase ) )
__lowerCamelCase : Tuple = eps
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Any ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowerCamelCase : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase : Union[str, Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
def lowerCamelCase__ ( self : str , UpperCAmelCase : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(UpperCAmelCase , 3.0 )) ))
class _snake_case ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ):
super().__init__()
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , out_features * 2 , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
__lowerCamelCase : Optional[Any] = self.scale_bias(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Dict = torch.chunk(UpperCAmelCase , 2 , -1 )
__lowerCamelCase : List[Any] = x * (1 + scale) + shift
return x
| 366
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCAmelCase :int = '''\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'''
class _lowerCamelCase ( unittest.TestCase , __a ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) -> Any:
__magic_name__ : int = load_tool('text-question-answering' )
self.tool.setup()
__magic_name__ : int = load_tool('text-question-answering' , remote=_UpperCamelCase )
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
__magic_name__ : Any = self.tool(_UpperCamelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(_UpperCamelCase , 'launched the BigScience Research Workshop' )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
__magic_name__ : int = self.remote_tool(_UpperCamelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(_UpperCamelCase , 'launched the BigScience Research Workshop' )
def __lowerCAmelCase ( self : Tuple ) -> Dict:
__magic_name__ : int = self.tool(text=_UpperCamelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(_UpperCamelCase , 'launched the BigScience Research Workshop' )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : int = self.remote_tool(text=_UpperCamelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(_UpperCamelCase , 'launched the BigScience Research Workshop' )
| 561
|
def lowerCamelCase__ ( __lowerCAmelCase : list ):
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) - 1 , 0 , -1 ):
lowerCAmelCase_ = False
for j in range(__lowerCAmelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase_ , lowerCAmelCase_ = unsorted[j - 1], unsorted[j]
lowerCAmelCase_ = True
for j in range(__lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase_ , lowerCAmelCase_ = unsorted[j + 1], unsorted[j]
lowerCAmelCase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input("Enter numbers separated by a comma:\n").strip()
_A = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 290
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case : Optional[int] = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _a ( a : List[str] , a : Dict , a : Optional[int]=None ):
if rng is None:
_SCREAMING_SNAKE_CASE = random.Random()
_SCREAMING_SNAKE_CASE = 1
for dim in shape:
total_dims *= dim
_SCREAMING_SNAKE_CASE = []
for _ in range(_SCREAMING_SNAKE_CASE ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_SCREAMING_SNAKE_CASE = np.array(_SCREAMING_SNAKE_CASE , dtype=jnp.intaa ).reshape(_SCREAMING_SNAKE_CASE )
return output
def _a ( a : List[str] , a : Any=None ):
_SCREAMING_SNAKE_CASE = ids_tensor(_SCREAMING_SNAKE_CASE , vocab_size=2 , rng=_SCREAMING_SNAKE_CASE )
# make sure that at least one token is attended to for each batch
_SCREAMING_SNAKE_CASE = 1
return attn_mask
@require_flax
class lowerCAmelCase :
a : Union[str, Any] = None
a : Optional[Any] = ()
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = inputs["input_ids"].shape[-1] // 2
_SCREAMING_SNAKE_CASE = inputs["input_ids"][:max_batch_size, :sequence_length]
_SCREAMING_SNAKE_CASE = jnp.ones_like(UpperCamelCase )
_SCREAMING_SNAKE_CASE = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_SCREAMING_SNAKE_CASE = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_SCREAMING_SNAKE_CASE = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = max_length
_SCREAMING_SNAKE_CASE = 0
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning
_SCREAMING_SNAKE_CASE = getattr(UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = pt_model_class(UpperCamelCase ).eval()
_SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(UpperCamelCase , flax_model.params )
_SCREAMING_SNAKE_CASE = flax_model.generate(UpperCamelCase ).sequences
_SCREAMING_SNAKE_CASE = pt_model.generate(torch.tensor(UpperCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_SCREAMING_SNAKE_CASE = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
_SCREAMING_SNAKE_CASE = jit(model.generate )
_SCREAMING_SNAKE_CASE = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
_SCREAMING_SNAKE_CASE = jit(model.generate )
_SCREAMING_SNAKE_CASE = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = max_length
_SCREAMING_SNAKE_CASE = 2
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
_SCREAMING_SNAKE_CASE = jit(model.generate )
_SCREAMING_SNAKE_CASE = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = max_length
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 2
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = max_length
_SCREAMING_SNAKE_CASE = 0.8
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 0.3
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
_SCREAMING_SNAKE_CASE = jit(model.generate )
_SCREAMING_SNAKE_CASE = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE = max_length
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
_SCREAMING_SNAKE_CASE = jit(model.generate )
_SCREAMING_SNAKE_CASE = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE = max_length
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
_SCREAMING_SNAKE_CASE = jit(model.generate )
_SCREAMING_SNAKE_CASE = jit_generate(UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
_SCREAMING_SNAKE_CASE = jit(model.generate )
_SCREAMING_SNAKE_CASE = jit_generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
_SCREAMING_SNAKE_CASE = jit(model.generate )
_SCREAMING_SNAKE_CASE = jit_generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase )
_SCREAMING_SNAKE_CASE = model.generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCamelCase )
_SCREAMING_SNAKE_CASE = jit(model.generate )
_SCREAMING_SNAKE_CASE = jit_generate(UpperCamelCase , attention_mask=UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_SCREAMING_SNAKE_CASE = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_SCREAMING_SNAKE_CASE = "Hello world"
_SCREAMING_SNAKE_CASE = tokenizer(UpperCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(UpperCamelCase , "do_samples" ):
model.generate(UpperCamelCase , do_samples=UpperCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(UpperCamelCase , "foo" ):
_SCREAMING_SNAKE_CASE = {"foo": "bar"}
model.generate(UpperCamelCase , **UpperCamelCase )
| 716
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase )
_SCREAMING_SNAKE_CASE = inputs["prompt"]
_SCREAMING_SNAKE_CASE = inputs["generator"]
_SCREAMING_SNAKE_CASE = inputs["num_inference_steps"]
_SCREAMING_SNAKE_CASE = inputs["output_type"]
if "image" in inputs:
_SCREAMING_SNAKE_CASE = inputs["image"]
else:
_SCREAMING_SNAKE_CASE = None
if "mask_image" in inputs:
_SCREAMING_SNAKE_CASE = inputs["mask_image"]
else:
_SCREAMING_SNAKE_CASE = None
if "original_image" in inputs:
_SCREAMING_SNAKE_CASE = inputs["original_image"]
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = pipe.encode_prompt(UpperCamelCase )
# inputs with prompt converted to embeddings
_SCREAMING_SNAKE_CASE = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_SCREAMING_SNAKE_CASE = image
if mask_image is not None:
_SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
_SCREAMING_SNAKE_CASE = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(UpperCamelCase )
pipe_loaded.to(UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase , UpperCamelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase )
_SCREAMING_SNAKE_CASE = inputs["generator"]
_SCREAMING_SNAKE_CASE = inputs["num_inference_steps"]
_SCREAMING_SNAKE_CASE = inputs["output_type"]
# inputs with prompt converted to embeddings
_SCREAMING_SNAKE_CASE = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_SCREAMING_SNAKE_CASE = image
if mask_image is not None:
_SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
_SCREAMING_SNAKE_CASE = original_image
_SCREAMING_SNAKE_CASE = pipe_loaded(**UpperCamelCase )[0]
_SCREAMING_SNAKE_CASE = np.abs(to_np(UpperCamelCase ) - to_np(UpperCamelCase ) ).max()
self.assertLess(UpperCamelCase , 1e-4 )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(UpperCamelCase )
pipe_loaded.to(UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe_loaded(**UpperCamelCase )[0]
_SCREAMING_SNAKE_CASE = np.abs(to_np(UpperCamelCase ) - to_np(UpperCamelCase ) ).max()
self.assertLess(UpperCamelCase , 1e-4 )
| 493
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = """RegNetConfig"""
# Base docstring
_lowerCAmelCase = """facebook/regnet-y-040"""
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = """facebook/regnet-y-040"""
_lowerCAmelCase = """tabby, tabby cat"""
_lowerCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , A__ , A__ , A__ = 3 , A__ = 1 , A__ = 1 , A__ = "relu" , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_: Optional[int] = nn.Convad(
A__ , A__ , kernel_size=A__ , stride=A__ , padding=kernel_size // 2 , groups=A__ , bias=A__ , )
UpperCAmelCase_: Any = nn.BatchNormad(A__ )
UpperCAmelCase_: Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = self.convolution(A__ )
UpperCAmelCase_: List[str] = self.normalization(A__ )
UpperCAmelCase_: Dict = self.activation(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , A__ ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_: List[str] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCAmelCase_: List[str] = config.num_channels
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
UpperCAmelCase_: int = self.embedder(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , A__ , A__ , A__ = 2 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_: Tuple = nn.Convad(A__ , A__ , kernel_size=1 , stride=A__ , bias=A__ )
UpperCAmelCase_: Any = nn.BatchNormad(A__ )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.convolution(A__ )
UpperCAmelCase_: List[str] = self.normalization(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , A__ , A__ ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_: List[str] = nn.AdaptiveAvgPoolad((1, 1) )
UpperCAmelCase_: Optional[Any] = nn.Sequential(
nn.Convad(A__ , A__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(A__ , A__ , kernel_size=1 ) , nn.Sigmoid() , )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Any = self.pooler(A__ )
UpperCAmelCase_: str = self.attention(A__ )
UpperCAmelCase_: Tuple = hidden_state * attention
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , A__ , A__ , A__ , A__ = 1 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_: Any = in_channels != out_channels or stride != 1
UpperCAmelCase_: List[Any] = max(1 , out_channels // config.groups_width )
UpperCAmelCase_: Dict = (
RegNetShortCut(A__ , A__ , stride=A__ ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_: Tuple = nn.Sequential(
RegNetConvLayer(A__ , A__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(A__ , A__ , stride=A__ , groups=A__ , activation=config.hidden_act ) , RegNetConvLayer(A__ , A__ , kernel_size=1 , activation=A__ ) , )
UpperCAmelCase_: Dict = ACTaFN[config.hidden_act]
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Any = hidden_state
UpperCAmelCase_: Optional[Any] = self.layer(A__ )
UpperCAmelCase_: str = self.shortcut(A__ )
hidden_state += residual
UpperCAmelCase_: str = self.activation(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , A__ , A__ , A__ , A__ = 1 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_: List[Any] = in_channels != out_channels or stride != 1
UpperCAmelCase_: Dict = max(1 , out_channels // config.groups_width )
UpperCAmelCase_: Dict = (
RegNetShortCut(A__ , A__ , stride=A__ ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_: Union[str, Any] = nn.Sequential(
RegNetConvLayer(A__ , A__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(A__ , A__ , stride=A__ , groups=A__ , activation=config.hidden_act ) , RegNetSELayer(A__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(A__ , A__ , kernel_size=1 , activation=A__ ) , )
UpperCAmelCase_: Optional[int] = ACTaFN[config.hidden_act]
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Dict = hidden_state
UpperCAmelCase_: Union[str, Any] = self.layer(A__ )
UpperCAmelCase_: Optional[int] = self.shortcut(A__ )
hidden_state += residual
UpperCAmelCase_: List[str] = self.activation(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , A__ , A__ , A__ , A__ = 2 , A__ = 2 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_: Union[str, Any] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
UpperCAmelCase_: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
A__ , A__ , A__ , stride=A__ , ) , *[layer(A__ , A__ , A__ ) for _ in range(depth - 1 )] , )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.layers(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , A__ ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_: Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
A__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCAmelCase_: Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A__ , config.depths[1:] ):
self.stages.append(RegNetStage(A__ , A__ , A__ , depth=A__ ) )
def snake_case_ ( self , A__ , A__ = False , A__ = True ):
"""simple docstring"""
UpperCAmelCase_: Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_: Dict = hidden_states + (hidden_state,)
UpperCAmelCase_: Optional[int] = stage_module(A__ )
if output_hidden_states:
UpperCAmelCase_: str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A__ , hidden_states=A__ )
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = RegNetConfig
snake_case_ = '''regnet'''
snake_case_ = '''pixel_values'''
snake_case_ = True
def snake_case_ ( self , A__ ):
"""simple docstring"""
if isinstance(A__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case_ ( self , A__ , A__=False ):
"""simple docstring"""
if isinstance(A__ , A__ ):
UpperCAmelCase_: int = value
_lowerCAmelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , snake_case__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCAmelCase__ ( snake_case__ ):
def __init__( self , A__ ):
"""simple docstring"""
super().__init__(A__ )
UpperCAmelCase_: Any = config
UpperCAmelCase_: str = RegNetEmbeddings(A__ )
UpperCAmelCase_: Any = RegNetEncoder(A__ )
UpperCAmelCase_: Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_ ( self , A__ , A__ = None , A__ = None ):
"""simple docstring"""
UpperCAmelCase_: Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_: Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_: str = self.embedder(A__ )
UpperCAmelCase_: Optional[int] = self.encoder(
A__ , output_hidden_states=A__ , return_dict=A__ )
UpperCAmelCase_: Optional[Any] = encoder_outputs[0]
UpperCAmelCase_: Any = self.pooler(A__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A__ , pooler_output=A__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , snake_case__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCAmelCase__ ( snake_case__ ):
def __init__( self , A__ ):
"""simple docstring"""
super().__init__(A__ )
UpperCAmelCase_: str = config.num_labels
UpperCAmelCase_: Optional[int] = RegNetModel(A__ )
# classification head
UpperCAmelCase_: Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_ ( self , A__ = None , A__ = None , A__ = None , A__ = None , ):
"""simple docstring"""
UpperCAmelCase_: int = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_: int = self.regnet(A__ , output_hidden_states=A__ , return_dict=A__ )
UpperCAmelCase_: Any = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_: List[str] = self.classifier(A__ )
UpperCAmelCase_: int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_: Any = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_: str = "single_label_classification"
else:
UpperCAmelCase_: Optional[Any] = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCAmelCase_: List[Any] = MSELoss()
if self.num_labels == 1:
UpperCAmelCase_: Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase_: Optional[int] = loss_fct(A__ , A__ )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_: Optional[Any] = CrossEntropyLoss()
UpperCAmelCase_: Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_: Tuple = BCEWithLogitsLoss()
UpperCAmelCase_: int = loss_fct(A__ , A__ )
if not return_dict:
UpperCAmelCase_: Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A__ , logits=A__ , hidden_states=outputs.hidden_states )
| 137
|
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 137
| 1
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A: Dict = logging.getLogger(__name__)
def _snake_case ( UpperCamelCase : Any=2 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=16 , UpperCamelCase : int = 10 , UpperCamelCase : int = 2 ):
def get_dataset(UpperCamelCase : Dict ):
UpperCAmelCase : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase : str = get_dataset(UpperCamelCase )
UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , shuffle=UpperCamelCase , batch_size=UpperCamelCase , num_workers=4 )
UpperCAmelCase : Dict = DataLoader(UpperCamelCase , shuffle=UpperCamelCase , batch_size=UpperCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=None ):
UpperCAmelCase : str = []
for epoch in range(UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase , UpperCAmelCase : Any = batch
UpperCAmelCase : int = model(UpperCamelCase )
UpperCAmelCase : Tuple = torch.nn.functional.mse_loss(UpperCamelCase , UpperCamelCase )
accelerator.backward(UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self ) -> Any:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Dict = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Any = DummyModel()
UpperCAmelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Any = dummy_dataloaders()
UpperCAmelCase : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_SCREAMING_SNAKE_CASE , automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase : Tuple = Accelerator(project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Union[str, Any] = DummyModel()
UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : int = dummy_dataloaders()
# Train baseline
UpperCAmelCase : Any = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save initial
UpperCAmelCase : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , """initial""" )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase : Optional[int] = optimizer.state_dict()
UpperCAmelCase : Any = train(3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : Any = model.a.item(), model.b.item()
UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase : str = DummyModel()
UpperCAmelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : List[str] = dummy_dataloaders()
UpperCAmelCase : Optional[int] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.load_state(_SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = train(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save everything
UpperCAmelCase : Dict = os.path.join(_SCREAMING_SNAKE_CASE , """checkpoint""" )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
# Load everything back in and make sure all states work
accelerator.load_state(_SCREAMING_SNAKE_CASE )
test_rands += train(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : str = model.a.item(), model.b.item()
UpperCAmelCase : int = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Dict = DummyModel()
UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Any = dummy_dataloaders()
UpperCAmelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase : List[Any] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE , project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
((UpperCAmelCase) , (UpperCAmelCase)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase : List[Any] = optimizer.state_dict()
UpperCAmelCase : str = train(3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase : Dict = DummyModel()
UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Tuple = dummy_dataloaders()
UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = Accelerator(project_dir=_SCREAMING_SNAKE_CASE , project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) )
((UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = train(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase : int = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([1, 2, 3] )
UpperCAmelCase : int = torch.tensor([2, 3, 4] )
UpperCAmelCase : List[Any] = DummyModel()
UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
UpperCAmelCase : List[str] = Accelerator()
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as ve:
accelerator.register_for_checkpointing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Optional[int] = DummyModel()
UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase : str = torch.optim.lr_scheduler.StepLR(_SCREAMING_SNAKE_CASE , step_size=1 , gamma=0.99 )
UpperCAmelCase , UpperCAmelCase : str = dummy_dataloaders()
UpperCAmelCase : Any = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE , project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
UpperCAmelCase : int = scheduler.state_dict()
train(3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(_SCREAMING_SNAKE_CASE , scheduler.state_dict() )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : str = DummyModel()
UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE , total_limit=2 )
# Train baseline
UpperCAmelCase : List[str] = Accelerator(project_dir=_SCREAMING_SNAKE_CASE , project_config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = accelerator.prepare(_SCREAMING_SNAKE_CASE )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = ["""torchrun""", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
A: List[Any] = "/tmp/accelerate/state_checkpointing"
A: Any = DummyModel()
A: Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A: int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A , A: int = dummy_dataloaders()
A: Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A: Dict = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A , A , A , A , A: Tuple = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A , A: Union[str, Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A: int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
A: List[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
A: Union[str, Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
A: str = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 359
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ):
UpperCAmelCase : dict[int, int] = {}
UpperCAmelCase : str = 2
while True:
UpperCAmelCase : List[str] = factor_map.pop(UpperCamelCase , UpperCamelCase )
if factor:
UpperCAmelCase : Dict = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase : Tuple = factor
else:
UpperCAmelCase : Optional[int] = prime
yield prime
prime += 1
def _snake_case ( UpperCamelCase : float = 1e1_0 ):
UpperCAmelCase : Tuple = sieve()
UpperCAmelCase : Optional[Any] = 1
while True:
UpperCAmelCase : List[str] = next(UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 359
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def _lowercase ( a__ : Optional[Any] , a__ : Dict , a__ : Dict ) -> List[str]:
"""simple docstring"""
hf_model.apply_weight_norm()
_UpperCamelCase = checkpoint["input_conv.weight_g"]
_UpperCamelCase = checkpoint["input_conv.weight_v"]
_UpperCamelCase = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
_UpperCamelCase = checkpoint[f'''upsamples.{i}.1.weight_g''']
_UpperCamelCase = checkpoint[f'''upsamples.{i}.1.weight_v''']
_UpperCamelCase = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_UpperCamelCase = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
_UpperCamelCase = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
_UpperCamelCase = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
_UpperCamelCase = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
_UpperCamelCase = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
_UpperCamelCase = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
_UpperCamelCase = checkpoint["output_conv.1.weight_g"]
_UpperCamelCase = checkpoint["output_conv.1.weight_v"]
_UpperCamelCase = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowercase ( a__ : Any , a__ : Tuple , a__ : str , a__ : str=None , a__ : Union[str, Any]=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
_UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(a__ )
else:
_UpperCamelCase = SpeechTaHifiGanConfig()
_UpperCamelCase = SpeechTaHifiGan(a__ )
_UpperCamelCase = torch.load(a__ )
load_weights(orig_checkpoint["model"]["generator"] , a__ , a__ )
_UpperCamelCase = np.load(a__ )
_UpperCamelCase = stats[0].reshape(-1 )
_UpperCamelCase = stats[1].reshape(-1 )
_UpperCamelCase = torch.from_numpy(a__ ).float()
_UpperCamelCase = torch.from_numpy(a__ ).float()
model.save_pretrained(a__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(a__ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 147
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def lowercase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=lowerCamelCase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCamelCase = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_UpperCamelCase = unet.half()
_UpperCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="np" , ).images
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowercase ( self ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , output_type="np" , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 147
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ = logging.get_logger(__name__)
A__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class a ( __snake_case , __snake_case ):
__lowerCAmelCase : List[Any] = 'swin'
__lowerCAmelCase : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self :Tuple ,__lowercase :Tuple=2_2_4 ,__lowercase :Union[str, Any]=4 ,__lowercase :List[str]=3 ,__lowercase :int=9_6 ,__lowercase :str=[2, 2, 6, 2] ,__lowercase :Union[str, Any]=[3, 6, 1_2, 2_4] ,__lowercase :str=7 ,__lowercase :Optional[Any]=4.0 ,__lowercase :Union[str, Any]=True ,__lowercase :List[Any]=0.0 ,__lowercase :str=0.0 ,__lowercase :int=0.1 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :Tuple=False ,__lowercase :Optional[Any]=0.02 ,__lowercase :Optional[int]=1e-5 ,__lowercase :List[Any]=3_2 ,__lowercase :str=None ,__lowercase :str=None ,**__lowercase :Optional[int] ,):
super().__init__(**A_ )
snake_case__ : Optional[int] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Dict = embed_dim
snake_case__ : List[Any] = depths
snake_case__ : int = len(A_ )
snake_case__ : List[Any] = num_heads
snake_case__ : Optional[int] = window_size
snake_case__ : Optional[int] = mlp_ratio
snake_case__ : str = qkv_bias
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Any = drop_path_rate
snake_case__ : int = hidden_act
snake_case__ : Tuple = use_absolute_embeddings
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : List[str] = initializer_range
snake_case__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : Optional[int] = int(embed_dim * 2 ** (len(A_ ) - 1) )
snake_case__ : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(A_ ) + 1 )]
snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=A_ ,out_indices=A_ ,stage_names=self.stage_names )
class a ( __snake_case ):
__lowerCAmelCase : Tuple = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self :List[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self :Optional[int] ):
return 1e-4
| 716
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a :
__lowerCAmelCase : int
__lowerCAmelCase : int
class a :
def __init__( self :Union[str, Any] ,__lowercase :int ):
snake_case__ : list[list[Edge]] = [[] for _ in range(__lowercase )]
snake_case__ : str = size
def __getitem__( self :Union[str, Any] ,__lowercase :int ):
return iter(self._graph[vertex] )
@property
def __lowerCamelCase ( self :List[Any] ):
return self._size
def __lowerCamelCase ( self :int ,__lowercase :int ,__lowercase :int ,__lowercase :int ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(__lowercase ,__lowercase ) )
def __lowerCamelCase ( self :str ,__lowercase :int ,__lowercase :int ):
snake_case__ : int = deque([start_vertex] )
snake_case__ : list[int | None] = [None] * self.size
snake_case__ : List[Any] = 0
while queue:
snake_case__ : Dict = queue.popleft()
snake_case__ : List[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case__ : Tuple = current_distance + edge.weight
snake_case__ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(__lowercase ,__lowercase )
and new_distance >= dest_vertex_distance
):
continue
snake_case__ : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
| 0
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase = model_type_to_module_name(UpperCAmelCase_ )
UpperCamelCase = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(UpperCAmelCase_ , "__name__" , UpperCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase = importlib.import_module("transformers" )
if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
return None
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , **UpperCAmelCase_ , )-> str:
"""simple docstring"""
UpperCamelCase = get_file_from_repo(
UpperCAmelCase_ , UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , revision=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(UpperCAmelCase_ , encoding="utf-8" ) as reader:
return json.load(UpperCAmelCase_ )
class __a :
def __init__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCamelCase = kwargs.pop("config" , UpperCAmelCase_ )
UpperCamelCase = kwargs.pop("trust_remote_code" , UpperCAmelCase_ )
UpperCamelCase = True
UpperCamelCase , UpperCamelCase = FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = config_dict.get("feature_extractor_type" , UpperCAmelCase_ )
UpperCamelCase = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
UpperCamelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# It could be in `config.feature_extractor_type``
UpperCamelCase = getattr(UpperCAmelCase_ , "feature_extractor_type" , UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
UpperCamelCase = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
UpperCamelCase = feature_extractor_class_from_name(UpperCAmelCase_ )
UpperCamelCase = feature_extractor_auto_map is not None
UpperCamelCase = feature_extractor_class is not None or type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING
UpperCamelCase = resolve_trust_remote_code(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
UpperCamelCase = get_class_from_dynamic_module(
UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = kwargs.pop("code_revision" , UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
UpperCamelCase = FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase_ )]
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
raise ValueError(
f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str )-> Optional[Any]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase_ , UpperCAmelCase_ )
| 554
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class __a ( _lowerCAmelCase ):
def __init__( self : str , **UpperCAmelCase_ : List[str] )-> Tuple:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , **UpperCAmelCase_ : int )-> List[str]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCamelCase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCamelCase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCamelCase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCamelCase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCamelCase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCamelCase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCamelCase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCamelCase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : str )-> Any:
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : float = 512 / 1_500 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 1 , )-> List[Any]:
"""simple docstring"""
UpperCamelCase = load_image(UpperCAmelCase_ )
UpperCamelCase = self.image_processor.size["longest_edge"]
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = self.image_processor(images=UpperCAmelCase_ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase = self.get_inference_context()
with inference_context():
UpperCamelCase = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
UpperCamelCase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCamelCase = image_embeddings
UpperCamelCase = grid_points.shape[1]
UpperCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase = input_labels[:, i : i + points_per_batch]
UpperCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=0.88 , UpperCAmelCase_ : Optional[int]=0.95 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : int=1 , )-> Any:
"""simple docstring"""
UpperCamelCase = model_inputs.pop("input_boxes" )
UpperCamelCase = model_inputs.pop("is_last" )
UpperCamelCase = model_inputs.pop("original_sizes" ).tolist()
UpperCamelCase = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCamelCase = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase = model_outputs["pred_masks"]
UpperCamelCase = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
UpperCamelCase = model_outputs["iou_scores"]
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[int]=0.7 , )-> Dict:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCamelCase = torch.cat(UpperCAmelCase_ )
UpperCamelCase = torch.cat(UpperCAmelCase_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
UpperCamelCase = {}
if output_rle_mask:
UpperCamelCase = rle_mask
if output_bboxes_mask:
UpperCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 554
| 1
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
@staticmethod
def UpperCAmelCase__ ( *_snake_case : Optional[Any] , **_snake_case : List[Any] )->int:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
A_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__lowerCAmelCase : Any = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : Dict )->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}],
[{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__lowerCAmelCase : Optional[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowerCAmelCase : Union[str, Any] = """How many cats are there?"""
__lowerCAmelCase : Dict = vqa_pipeline(image=_snake_case , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
_snake_case , [{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}, {"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}] )
__lowerCAmelCase : List[Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
_snake_case , [{"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}, {"""score""": ANY(_snake_case ), """answer""": ANY(_snake_case )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
__lowerCAmelCase : Dict = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowerCAmelCase : List[Any] = """How many cats are there?"""
__lowerCAmelCase : Union[str, Any] = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__lowerCAmelCase : Union[str, Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__lowerCAmelCase : int = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def UpperCAmelCase__ ( self : List[str] )->List[str]:
'''simple docstring'''
pass
| 707
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[list[float]]:
__lowerCAmelCase : Optional[int] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCAmelCase : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCAmelCase : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
__lowerCAmelCase , __lowerCAmelCase : str = matrix[1][1], matrix[0][0]
__lowerCAmelCase , __lowerCAmelCase : str = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCAmelCase : Union[str, Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
__lowerCAmelCase : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCAmelCase : Optional[int] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCAmelCase : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCAmelCase : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCAmelCase : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCAmelCase : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCAmelCase : Dict = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCAmelCase : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCAmelCase : Tuple = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCAmelCase : Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCAmelCase : Optional[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
__lowerCAmelCase : List[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCAmelCase : List[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 240
| 0
|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__UpperCamelCase = Mapping[str, np.ndarray]
__UpperCamelCase = Mapping[str, Any] # Is a nested dict.
__UpperCamelCase = 0.01
@dataclasses.dataclass(frozen=__lowercase )
class _A :
lowercase__: np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase__: np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase__: np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase__: np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase__: np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase__: Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase__: Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowercase__: Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowercase__: Optional[Sequence[int]] = None
def _a ( _lowerCamelCase ) -> Protein:
"""simple docstring"""
__snake_case : List[str] = R"""(\[[A-Z]+\]\n)"""
__snake_case : List[str] = [tag.strip() for tag in re.split(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) > 0]
__snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
__snake_case : List[str] = ["N", "CA", "C"]
__snake_case : Union[str, Any] = None
__snake_case : Optional[Any] = None
__snake_case : str = None
for g in groups:
if "[PRIMARY]" == g[0]:
__snake_case : Any = g[1][0].strip()
for i in range(len(_lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
__snake_case : Union[str, Any] = """X""" # FIXME: strings are immutable
__snake_case : Optional[Any] = np.array(
[residue_constants.restype_order.get(_lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_lowerCamelCase , g[1][axis].split() ) ) )
__snake_case : Optional[Any] = np.array(_lowerCamelCase )
__snake_case : Any = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
__snake_case : Union[str, Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__snake_case : Union[str, Any] = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
__snake_case : List[str] = np.zeros(
(
len(_lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
__snake_case : Optional[Any] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowerCamelCase , atom_mask=_lowerCamelCase , aatype=_lowerCamelCase , residue_index=np.arange(len(_lowerCamelCase ) ) , b_factors=_lowerCamelCase , )
def _a ( _lowerCamelCase , _lowerCamelCase = 0 ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = []
__snake_case : Tuple = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
__snake_case : Optional[int] = prot.parents
__snake_case : Optional[int] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__snake_case : Any = [p for i, p in zip(_lowerCamelCase , _lowerCamelCase ) if i == chain_id]
if parents is None or len(_lowerCamelCase ) == 0:
__snake_case : str = ["""N/A"""]
pdb_headers.append(F'''PARENT {" ".join(_lowerCamelCase )}''' )
return pdb_headers
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : List[str] = []
__snake_case : int = pdb_str.split("""\n""" )
__snake_case : Union[str, Any] = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
__snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__snake_case : Union[str, Any] = []
if prot.parents_chain_index is not None:
__snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowerCamelCase ) , [] )
parent_dict[str(_lowerCamelCase )].append(_lowerCamelCase )
__snake_case : Optional[int] = max([int(_lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__snake_case : Tuple = parent_dict.get(str(_lowerCamelCase ) , ["""N/A"""] )
parents_per_chain.append(_lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__snake_case : Dict = [["""N/A"""]]
def make_parent_line(_lowerCamelCase ) -> str:
return F'''PARENT {" ".join(_lowerCamelCase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__snake_case : Any = 0
for i, l in enumerate(_lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowerCamelCase ):
__snake_case : int = parents_per_chain[chain_counter]
else:
__snake_case : int = ["""N/A"""]
out_pdb_lines.append(make_parent_line(_lowerCamelCase ) )
return "\n".join(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = residue_constants.restypes + ["""X"""]
def res_atoa(_lowerCamelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
__snake_case : Union[str, Any] = residue_constants.atom_types
__snake_case : List[str] = []
__snake_case : int = prot.atom_mask
__snake_case : Optional[int] = prot.aatype
__snake_case : List[str] = prot.atom_positions
__snake_case : str = prot.residue_index.astype(np.intaa )
__snake_case : Optional[int] = prot.b_factors
__snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
__snake_case : Optional[int] = get_pdb_headers(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
pdb_lines.extend(_lowerCamelCase )
__snake_case : Dict = aatype.shape[0]
__snake_case : Optional[int] = 1
__snake_case : List[str] = 0
__snake_case : Optional[Any] = string.ascii_uppercase
__snake_case : Tuple = None
# Add all atom sites.
for i in range(_lowerCamelCase ):
__snake_case : Optional[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__snake_case : int = """ATOM"""
__snake_case : int = atom_name if len(_lowerCamelCase ) == 4 else F''' {atom_name}'''
__snake_case : List[str] = """"""
__snake_case : Optional[Any] = """"""
__snake_case : Any = 1.00
__snake_case : Optional[Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
__snake_case : List[Any] = """"""
__snake_case : Tuple = """A"""
if chain_index is not None:
__snake_case : Optional[int] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__snake_case : Optional[int] = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
__snake_case : List[str] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__snake_case : Optional[Any] = True
__snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
__snake_case : Optional[Any] = """TER"""
__snake_case : List[str] = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowerCamelCase , _lowerCamelCase ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=_lowerCamelCase , remark=_lowerCamelCase , parents=_lowerCamelCase , parents_chain_index=_lowerCamelCase , )
| 26
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : jnp.ndarray
a : jnp.ndarray
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
a : int
a : Tuple[int] =(16, 32, 96, 2_56)
a : jnp.dtype =jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase : List[Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase : List[Any] = self.block_out_channels[i]
lowerCAmelCase : Optional[int] = self.block_out_channels[i + 1]
lowerCAmelCase : Tuple = nn.Conv(
snake_case__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCAmelCase : List[str] = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCAmelCase : Tuple = blocks
lowerCAmelCase : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = self.conv_in(snake_case__ )
lowerCAmelCase : Dict = nn.silu(snake_case__ )
for block in self.blocks:
lowerCAmelCase : Any = block(snake_case__ )
lowerCAmelCase : Optional[Any] = nn.silu(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.conv_out(snake_case__ )
return embedding
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module , lowercase , lowercase ):
"""simple docstring"""
a : int =32
a : int =4
a : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a : Union[bool, Tuple[bool]] =False
a : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
a : int =2
a : Union[int, Tuple[int]] =8
a : Optional[Union[int, Tuple[int]]] =None
a : int =12_80
a : float =0.0
a : bool =False
a : jnp.dtype =jnp.floataa
a : bool =True
a : int =0
a : str ="rgb"
a : Tuple[int] =(16, 32, 96, 2_56)
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase : List[Any] = jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCAmelCase : int = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase : int = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase : Union[str, Any] = jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jax.random.split(snake_case__ )
lowerCAmelCase : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.block_out_channels
lowerCAmelCase : List[str] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase : Union[str, Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase : str = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
lowerCAmelCase : Union[str, Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase : Any = self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = []
lowerCAmelCase : int = block_out_channels[0]
lowerCAmelCase : str = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase : Dict = output_channel
lowerCAmelCase : Any = block_out_channels[i]
lowerCAmelCase : Tuple = i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase : int = FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
for _ in range(self.layers_per_block ):
lowerCAmelCase : Optional[int] = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
if not is_final_block:
lowerCAmelCase : Dict = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
lowerCAmelCase : str = down_blocks
lowerCAmelCase : Optional[int] = controlnet_down_blocks
# mid
lowerCAmelCase : Tuple = block_out_channels[-1]
lowerCAmelCase : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase : Tuple = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1.0 , snake_case__ = True , snake_case__ = False , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase : Optional[int] = jnp.flip(snake_case__ , axis=1 )
# 1. time
if not isinstance(snake_case__ , jnp.ndarray ):
lowerCAmelCase : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase : str = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase : Optional[int] = jnp.expand_dims(snake_case__ , 0 )
lowerCAmelCase : Union[str, Any] = self.time_proj(snake_case__ )
lowerCAmelCase : Tuple = self.time_embedding(snake_case__ )
# 2. pre-process
lowerCAmelCase : Tuple = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCAmelCase : Union[str, Any] = self.conv_in(snake_case__ )
lowerCAmelCase : List[Any] = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCAmelCase : Optional[int] = self.controlnet_cond_embedding(snake_case__ )
sample += controlnet_cond
# 3. down
lowerCAmelCase : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
lowerCAmelCase , lowerCAmelCase : int = down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase : Union[str, Any] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase : Optional[int] = ()
for down_block_res_sample, controlnet_block in zip(snake_case__ , self.controlnet_down_blocks ):
lowerCAmelCase : Any = controlnet_block(snake_case__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase : Dict = controlnet_down_block_res_samples
lowerCAmelCase : Optional[Any] = self.controlnet_mid_block(snake_case__ )
# 6. scaling
lowerCAmelCase : List[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case__ , mid_block_res_sample=snake_case__ )
| 645
| 0
|
def __lowercase ( __lowerCAmelCase : Tuple ): # noqa: E741
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = [0] * n
a__ = [False] * n
a__ = [False] * n
def dfs(__lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
if parent == root:
out_edge_count += 1
a__ = True
a__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
a__ = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
a__ = True
# AP found via cycle
if at == low[to]:
a__ = True
else:
a__ = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
a__ = 0
a__ = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
a__ = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
snake_case : str = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 657
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCamelCase = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCamelCase = 0
UpperCamelCase = 0xe0_00
UpperCamelCase = 0xe0_01
UpperCamelCase = 0xe0_02
UpperCamelCase = 0xe0_03
UpperCamelCase = 0xe0_04
# Maps special codepoints to human-readable names.
UpperCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :Tuple , lowerCamelCase__ :Optional[int]=chr(lowerCamelCase__ ) , lowerCamelCase__ :Optional[Any]=chr(lowerCamelCase__ ) , lowerCamelCase__ :Optional[Any]=chr(lowerCamelCase__ ) , lowerCamelCase__ :Dict=chr(lowerCamelCase__ ) , lowerCamelCase__ :List[Any]=chr(lowerCamelCase__ ) , lowerCamelCase__ :Dict=chr(lowerCamelCase__ ) , lowerCamelCase__ :Union[str, Any]=False , lowerCamelCase__ :int=20_48 , **lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
UpperCamelCase__ :List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
UpperCamelCase__ :Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
UpperCamelCase__ :Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
UpperCamelCase__ :Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ :str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , model_max_length=lowerCamelCase__ , **lowerCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase__ :Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase__ :str = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase__ :Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase__ :Any = UNICODE_VOCAB_SIZE
UpperCamelCase__ :List[str] = len(self._special_codepoints )
@property
def __a ( self :Any ):
return self._unicode_vocab_size
def __a ( self :Optional[int] , lowerCamelCase__ :str ):
return list(lowerCamelCase__ )
def __a ( self :List[str] , lowerCamelCase__ :str ):
try:
return ord(lowerCamelCase__ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def __a ( self :Any , lowerCamelCase__ :int ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCamelCase__ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] ):
return "".join(lowerCamelCase__ )
def __a ( self :Optional[int] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :Union[str, Any] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
UpperCamelCase__ :Dict = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __a ( self :Tuple , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None , lowerCamelCase__ :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Tuple = [1] + ([0] * len(lowerCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCamelCase__ )) + [1]
return result
def __a ( self :int , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :List[Any] = [self.sep_token_id]
UpperCamelCase__ :Optional[Any] = [self.cls_token_id]
UpperCamelCase__ :Tuple = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ):
return ()
| 45
|
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45
| 1
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE( snake_case_ : tuple[int, int] , snake_case_ : int ) ->list[tuple[int, int]]:
'''simple docstring'''
_lowercase , _lowercase : List[str] = position
_lowercase : Dict = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_lowercase : Any = []
for position in positions:
_lowercase , _lowercase : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case_ )
return permissible_positions
def _SCREAMING_SNAKE_CASE( snake_case_ : list[list[int]] ) ->bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def _SCREAMING_SNAKE_CASE( snake_case_ : list[list[int]] , snake_case_ : tuple[int, int] , snake_case_ : int ) ->bool:
'''simple docstring'''
if is_complete(snake_case_ ):
return True
for position in get_valid_pos(snake_case_ , len(snake_case_ ) ):
_lowercase , _lowercase : List[str] = position
if board[y][x] == 0:
_lowercase : Optional[int] = curr + 1
if open_knight_tour_helper(snake_case_ , snake_case_ , curr + 1 ):
return True
_lowercase : str = 0
return False
def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->list[list[int]]:
'''simple docstring'''
_lowercase : Dict = [[0 for i in range(snake_case_ )] for j in range(snake_case_ )]
for i in range(snake_case_ ):
for j in range(snake_case_ ):
_lowercase : List[str] = 1
if open_knight_tour_helper(snake_case_ , (i, j) , 1 ):
return board
_lowercase : Optional[int] = 0
_lowercase : str = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _lowerCAmelCase ( __A ):
'''simple docstring'''
def __init__( self : int ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = []
def __lowercase ( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , **UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
self.events.append('''on_init_end''' )
def __lowercase ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , **UpperCamelCase_ : int ) -> int:
'''simple docstring'''
self.events.append('''on_train_begin''' )
def __lowercase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
self.events.append('''on_train_end''' )
def __lowercase ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : str , **UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
self.events.append('''on_epoch_begin''' )
def __lowercase ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : str , **UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
self.events.append('''on_epoch_end''' )
def __lowercase ( self : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
self.events.append('''on_step_begin''' )
def __lowercase ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
self.events.append('''on_step_end''' )
def __lowercase ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
self.events.append('''on_evaluate''' )
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.events.append('''on_predict''' )
def __lowercase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.events.append('''on_save''' )
def __lowercase ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
self.events.append('''on_log''' )
def __lowercase ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
self.events.append('''on_prediction_step''' )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = tempfile.mkdtemp()
def __lowercase ( self : Dict ) -> str:
'''simple docstring'''
shutil.rmtree(self.output_dir )
def __lowercase ( self : Optional[int] , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0 , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Optional[int]=64 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]=False , **UpperCamelCase_ : str ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = RegressionDataset(length=UpperCamelCase_ )
_lowercase : Any = RegressionDataset(length=UpperCamelCase_ )
_lowercase : int = RegressionModelConfig(a=UpperCamelCase_ , b=UpperCamelCase_ )
_lowercase : int = RegressionPreTrainedModel(UpperCamelCase_ )
_lowercase : int = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase_ , report_to=[] , **UpperCamelCase_ )
return Trainer(
UpperCamelCase_ , UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , callbacks=UpperCamelCase_ , )
def __lowercase ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
# Order doesn't matter
_lowercase : Tuple = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
_lowercase : Union[str, Any] = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , cba.__class__ )
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(cba.__class__ , UpperCamelCase_ )
else:
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : List[Any] , UpperCamelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = ['''on_init_end''', '''on_train_begin''']
_lowercase : Optional[int] = 0
_lowercase : int = len(trainer.get_eval_dataloader() )
_lowercase : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(UpperCamelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowercase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_trainer()
_lowercase : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# Callbacks passed at init are added to the default callbacks
_lowercase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_lowercase : Optional[Any] = self.get_trainer(disable_tqdm=UpperCamelCase_ )
_lowercase : Dict = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def __lowercase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_lowercase : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
_lowercase : Dict = self.get_trainer()
_lowercase : Dict = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(cb.__class__ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# We can also add, pop, or remove by instance
_lowercase : str = self.get_trainer()
_lowercase : str = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
_lowercase : Tuple = self.get_trainer()
_lowercase : Dict = trainer.callback_handler.callbacks[0]
_lowercase : Optional[int] = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def __lowercase ( self : Dict ) -> Any:
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=UpperCamelCase_ )
_lowercase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_lowercase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# Independent log/save/eval
_lowercase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_lowercase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
_lowercase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_lowercase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
_lowercase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
_lowercase : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
_lowercase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
_lowercase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# A bit of everything
_lowercase : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
_lowercase : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
_lowercase : Union[str, Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCamelCase_ ) in warn_mock.call_args[0][0]
| 411
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : str = logging.get_logger()
@dataclass
class lowerCAmelCase_ :
__a : nn.Module
__a : List[nn.Module] = field(default_factory=lowerCamelCase_ )
__a : list = field(default_factory=lowerCamelCase_ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ ,nn.Convad ) or isinstance(snake_case__ ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self ,snake_case__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def snake_case ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class lowerCAmelCase_ :
__a : nn.Module
__a : nn.Module
__a : int = 0
__a : List = field(default_factory=lowerCamelCase_ )
__a : List = field(default_factory=lowerCamelCase_ )
def __call__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Tracker(self.dest )(snake_case__ ).parametrized
SCREAMING_SNAKE_CASE_ : int = Tracker(self.src )(snake_case__ ).parametrized
SCREAMING_SNAKE_CASE_ : Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip ,snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip ,snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(snake_case__ )} operations while'
F' destination module has {len(snake_case__ )}.' )
for dest_m, src_m in zip(snake_case__ ,snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : ResNetConfig , lowerCamelCase_ : Path , lowerCamelCase_ : bool = True ) -> List[Any]:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : Tuple = ResNetForImageClassification(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowerCamelCase_ )
assert torch.allclose(from_model(lowerCamelCase_ ) , our_model(lowerCamelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE_ : Tuple = F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
# we can use the convnext one
SCREAMING_SNAKE_CASE_ : Dict = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
print(F'Pushed {checkpoint_name}' )
def __UpperCAmelCase ( lowerCamelCase_ : Path , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = True ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : Tuple = 10_00
SCREAMING_SNAKE_CASE_ : List[str] = (1, num_labels)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE_ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCamelCase_ , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase__ : str = parser.parse_args()
UpperCamelCase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 709
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar("""T""")
UpperCAmelCase = TypeVar("""U""")
class lowercase__ ( Generic[T, U] ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : Any = key
_lowerCamelCase : Any = val
_lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
_lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self) -> str:
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next)}, has prev: {bool(self.prev)}'
)
class lowercase__ ( Generic[T, U] ):
def __init__( self) -> None:
_lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.rear, self.head
def __repr__( self) -> str:
_lowerCamelCase : Union[str, Any] = ["""DoubleLinkedList"""]
_lowerCamelCase : List[Any] = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE))
_lowerCamelCase : Optional[Any] = node.next
rep.append(str(self.rear))
return ",\n ".join(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : Dict = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCamelCase : Optional[int] = node
_lowerCamelCase : List[str] = previous
_lowerCamelCase : Dict = node
_lowerCamelCase : str = self.rear
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_lowerCamelCase : Dict = node.next
_lowerCamelCase : int = node.prev
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Any = None
return node
class lowercase__ ( Generic[T, U] ):
__UpperCAmelCase = {}
def __init__( self , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
_lowerCamelCase : Optional[int] = capacity
_lowerCamelCase : int = 0
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self) -> str:
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , SCREAMING_SNAKE_CASE) -> bool:
return key in self.cache
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_lowerCamelCase : DoubleLinkedListNode[T, U] = self.cache[key]
_lowerCamelCase : Dict = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE)
return node.val
self.miss += 1
return None
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCamelCase : int = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCamelCase : str = DoubleLinkedListNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCamelCase : List[str] = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
_lowerCamelCase : Union[str, Any] = value
self.list.add(SCREAMING_SNAKE_CASE)
@classmethod
def UpperCamelCase_ ( cls , SCREAMING_SNAKE_CASE = 128) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(SCREAMING_SNAKE_CASE) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCamelCase : Union[str, Any] = LRUCache(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
_lowerCamelCase : Any = func(*SCREAMING_SNAKE_CASE)
cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE , """cache_info""" , SCREAMING_SNAKE_CASE) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""),
}) , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE)
}
| 88
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ )-> str:
'''simple docstring'''
a : List[Any] = 2
a : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase__ )
if n > 1:
factors.append(lowerCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Dict):
a : Tuple = 0
a : Any = [0]
a : List[Any] = [0]
a : List[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : Dict = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = 3
a : Any = [1, 2, 3]
a : List[Any] = [3, 2, 1]
a : Optional[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : str):
a : int = 50
a : Dict = [60, 100, 120]
a : Tuple = [10, 20, 30]
a : Optional[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 135
| 0
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( a__ ):
def __init__(self : Union[str, Any] ,*SCREAMING_SNAKE_CASE_ : Optional[Any] ,SCREAMING_SNAKE_CASE_ : Dict=None ,SCREAMING_SNAKE_CASE_ : Tuple=None ,**SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
super().__init__(*_A ,**_A )
lowerCAmelCase = eval_examples
lowerCAmelCase = post_process_function
def UpperCAmelCase (self : Any ,SCREAMING_SNAKE_CASE_ : List[Any] = None ,SCREAMING_SNAKE_CASE_ : List[Any]=None ,SCREAMING_SNAKE_CASE_ : Optional[Any] = None ,SCREAMING_SNAKE_CASE_ : Optional[Any] = "eval" ,**SCREAMING_SNAKE_CASE_ : Optional[Any] ,) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = gen_kwargs.copy()
lowerCAmelCase = (
gen_kwargs['max_length'] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowerCAmelCase = (
gen_kwargs['num_beams'] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowerCAmelCase = gen_kwargs
lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase = self.get_eval_dataloader(_A )
lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase = self.compute_metrics
lowerCAmelCase = None
lowerCAmelCase = time.time()
lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase = eval_loop(
_A ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_A ,metric_key_prefix=_A ,)
finally:
lowerCAmelCase = compute_metrics
lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A ,_A ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase = self.post_process_function(_A ,_A ,_A )
lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
else:
lowerCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_A )
return metrics
def UpperCAmelCase (self : int ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : List[str]=None ,SCREAMING_SNAKE_CASE_ : Dict = "test" ,**SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = gen_kwargs.copy()
lowerCAmelCase = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase = self.compute_metrics
lowerCAmelCase = None
lowerCAmelCase = time.time()
lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase = eval_loop(
_A ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_A ,metric_key_prefix=_A ,)
finally:
lowerCAmelCase = compute_metrics
lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A ,_A ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase = self.post_process_function(_A ,_A ,_A ,'''predict''' )
lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_A )
| 535
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 100 , ):
'''simple docstring'''
_lowerCAmelCase : str = x_start
_lowerCAmelCase : Optional[Any] = fnc(_lowerCamelCase )
_lowerCAmelCase : Dict = 0.0
for _ in range(_lowerCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
_lowerCAmelCase : Dict = (x_end - x_start) / steps + xa
_lowerCAmelCase : Tuple = fnc(_lowerCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_lowerCAmelCase : Optional[Any] = xa
_lowerCAmelCase : Union[str, Any] = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
_lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'''With {i} steps: {line_length(f, -1_0, 1_0, i)}''')
i *= 1_0
| 259
| 0
|
from math import factorial, radians
def UpperCAmelCase__ ( _A , _A = 18 , _A = 10 ):
"""simple docstring"""
a_ = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
a_ = radians(__lowerCAmelCase )
a_ = angle_in_radians
a_ = 3
a_ = -1
for _ in range(__lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(__lowerCAmelCase )
a_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 709
|
from math import pow
def UpperCAmelCase__ ( _A , _A , _A , _A , _A , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
a_ = int(pow(_A , _A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
return current_sum, solutions_count
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : Dict = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( __magic_name__ ):
@staticmethod
@abstractmethod
def _snake_case ( UpperCamelCase_ ) -> Dict:
raise NotImplementedError()
@abstractmethod
def _snake_case ( self ) -> List[Any]:
raise NotImplementedError()
| 368
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
UpperCAmelCase__ : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
UpperCAmelCase__ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class lowerCAmelCase_ ( lowercase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = """whisper"""
SCREAMING_SNAKE_CASE_ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , UpperCAmelCase_ : List[str]=51865 , UpperCAmelCase_ : Any=80 , UpperCAmelCase_ : Dict=6 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Tuple=6 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : str=1536 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=50257 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=1500 , UpperCAmelCase_ : Union[str, Any]=448 , UpperCAmelCase_ : Union[str, Any]=50256 , UpperCAmelCase_ : List[Any]=50256 , UpperCAmelCase_ : List[str]=50256 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[Any]=[220, 50256] , UpperCAmelCase_ : int=False , UpperCAmelCase_ : str=256 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=0.05 , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]=7 , **UpperCAmelCase_ : Dict , ) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : List[Any] = num_mel_bins
_UpperCAmelCase : int = d_model
_UpperCAmelCase : int = encoder_layers
_UpperCAmelCase : int = encoder_attention_heads
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Optional[Any] = decoder_attention_heads
_UpperCAmelCase : List[str] = decoder_ffn_dim
_UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = activation_dropout
_UpperCAmelCase : Any = activation_function
_UpperCAmelCase : Any = init_std
_UpperCAmelCase : List[Any] = encoder_layerdrop
_UpperCAmelCase : List[Any] = decoder_layerdrop
_UpperCAmelCase : Union[str, Any] = use_cache
_UpperCAmelCase : str = encoder_layers
_UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : int = max_source_positions
_UpperCAmelCase : List[str] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase : Optional[Any] = classifier_proj_size
_UpperCAmelCase : Dict = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : str = apply_spec_augment
_UpperCAmelCase : Union[str, Any] = mask_time_prob
_UpperCAmelCase : Union[str, Any] = mask_time_length
_UpperCAmelCase : Any = mask_time_min_masks
_UpperCAmelCase : Tuple = mask_feature_prob
_UpperCAmelCase : Optional[Any] = mask_feature_length
_UpperCAmelCase : Optional[Any] = mask_feature_min_masks
_UpperCAmelCase : List[str] = median_filter_width
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , suppress_tokens=UpperCAmelCase_ , begin_suppress_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
class lowerCAmelCase_ ( lowercase_ ):
@property
def a_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCAmelCase : Tuple = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCAmelCase : Optional[Any] = {0: '''batch'''}
else:
_UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='''inputs''' )
return common_inputs
def a_ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 22050 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : int = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = OrderedDict()
_UpperCAmelCase : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCAmelCase_ , framework=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , time_duration=UpperCAmelCase_ , frequency=UpperCAmelCase_ , )
_UpperCAmelCase : Tuple = encoder_inputs['''input_features'''].shape[2]
_UpperCAmelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCAmelCase : List[str] = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Dict = encoder_inputs.pop('''input_features''' )
_UpperCAmelCase : int = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCAmelCase : Dict = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def a_ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1E-3
| 416
|
from typing import List
from .keymap import KEYMAP, get_character
def _A ( _UpperCamelCase ):
def decorator(_UpperCamelCase ):
_UpperCAmelCase : Optional[int] = getattr(_UpperCamelCase , '''handle_key''' , [] )
handle += [key]
setattr(_UpperCamelCase , '''handle_key''' , _UpperCamelCase )
return func
return decorator
def _A ( *_UpperCamelCase ):
def decorator(_UpperCamelCase ):
_UpperCAmelCase : Any = getattr(_UpperCamelCase , '''handle_key''' , [] )
handle += keys
setattr(_UpperCamelCase , '''handle_key''' , _UpperCamelCase )
return func
return decorator
class lowerCAmelCase_ ( lowercase_ ):
def __new__( cls : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = super().__new__(cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , '''key_handler''' ):
setattr(UpperCAmelCase_ , '''key_handler''' , {} )
setattr(UpperCAmelCase_ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase : List[str] = getattr(UpperCAmelCase_ , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase : Optional[Any] = value
return new_cls
@staticmethod
def a_ ( cls : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase : str = ord(UpperCAmelCase_ )
_UpperCAmelCase : str = cls.key_handler.get(UpperCAmelCase_ )
if handler:
_UpperCAmelCase : Optional[int] = char
return handler(cls )
else:
return None
def _A ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 416
| 1
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case:
'''simple docstring'''
UpperCAmelCase : str
UpperCAmelCase : str = None
@staticmethod
def __snake_case ( ) -> List[str]:
raise NotImplementedError
def __snake_case ( self , A_ , A_ , A_ , **A_ ) -> Any:
raise NotImplementedError
def __snake_case ( self , A_ ) -> List[Any]:
raise NotImplementedError
def __snake_case ( self ) -> Union[str, Any]:
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def __snake_case ( cls ) -> Any:
return f'`pip install {cls.pip_package or cls.name}`'
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = "optuna"
@staticmethod
def __snake_case ( ) -> Any:
return is_optuna_available()
def __snake_case ( self , A_ , A_ , A_ , **A_ ) -> Optional[int]:
return run_hp_search_optuna(A_ , A_ , A_ , **A_ )
def __snake_case ( self , A_ ) -> Any:
return default_hp_space_optuna(A_ )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "ray"
UpperCAmelCase : Tuple = "'ray[tune]'"
@staticmethod
def __snake_case ( ) -> List[Any]:
return is_ray_available()
def __snake_case ( self , A_ , A_ , A_ , **A_ ) -> Any:
return run_hp_search_ray(A_ , A_ , A_ , **A_ )
def __snake_case ( self , A_ ) -> str:
return default_hp_space_ray(A_ )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = "sigopt"
@staticmethod
def __snake_case ( ) -> Dict:
return is_sigopt_available()
def __snake_case ( self , A_ , A_ , A_ , **A_ ) -> Tuple:
return run_hp_search_sigopt(A_ , A_ , A_ , **A_ )
def __snake_case ( self , A_ ) -> List[str]:
return default_hp_space_sigopt(A_ )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = "wandb"
@staticmethod
def __snake_case ( ) -> Tuple:
return is_wandb_available()
def __snake_case ( self , A_ , A_ , A_ , **A_ ) -> Dict:
return run_hp_search_wandb(A_ , A_ , A_ , **A_ )
def __snake_case ( self , A_ ) -> Tuple:
return default_hp_space_wandb(A_ )
UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _snake_case ( ) -> str:
"""simple docstring"""
lowerCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase = available_backends[0].name
if len(_SCREAMING_SNAKE_CASE ) > 1:
logger.info(
f'{len(_SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 433
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
UpperCAmelCase = {
'ctrl': 256,
}
UpperCAmelCase = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
lowerCAmelCase = set(_SCREAMING_SNAKE_CASE )
return pairs
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[Any] = CONTROL_CODES
def __init__( self , A_ , A_ , A_="<unk>" , **A_ ) -> int:
super().__init__(unk_token=A_ , **A_ )
with open(A_ , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase = json.load(A_ )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCAmelCase = {}
@property
def __snake_case ( self ) -> Optional[Any]:
return len(self.encoder )
def __snake_case ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self , A_ ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(A_ )
lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCAmelCase = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCAmelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase, lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(A_ ):
try:
lowerCAmelCase = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(A_ )
lowerCAmelCase = new_word
if len(A_ ) == 1:
break
else:
lowerCAmelCase = get_pairs(A_ )
lowerCAmelCase = """@@ """.join(A_ )
lowerCAmelCase = word[:-4]
lowerCAmelCase = word
return word
def __snake_case ( self , A_ ) -> int:
lowerCAmelCase = []
lowerCAmelCase = re.findall(r"""\S+\n?""" , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(""" """ ) ) )
return split_tokens
def __snake_case ( self , A_ ) -> Union[str, Any]:
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self , A_ ) -> Optional[int]:
return self.decoder.get(A_ , self.unk_token )
def __snake_case ( self , A_ ) -> Any:
lowerCAmelCase = """ """.join(A_ ).replace("""@@ """ , """""" ).strip()
return out_string
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + """\n""" )
lowerCAmelCase = 0
with open(A_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase = token_index
writer.write(""" """.join(A_ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 433
| 1
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ):
__lowercase : Optional[int] = []
for part_id in partition_order:
__lowercase : List[Any] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(lowerCAmelCase_ ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : str = spark.range(100 ).repartition(1 )
__lowercase : str = Spark(lowerCAmelCase_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : List[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : List[str] = spark.range(10 ).repartition(2 )
__lowercase : List[Any] = [1, 0]
__lowercase : Optional[int] = _generate_iterable_examples(lowerCAmelCase_ , lowerCAmelCase_ ) # Reverse the partitions.
__lowercase : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , lowerCAmelCase_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__lowercase , __lowercase : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : List[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : Optional[Any] = spark.range(10 ).repartition(1 )
__lowercase : Dict = SparkExamplesIterable(lowerCAmelCase_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : Optional[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
__lowercase : List[Any] = lambda lowerCAmelCase_ : x.reverse()
__lowercase : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , [2, 1, 0] )
__lowercase : str = SparkExamplesIterable(lowerCAmelCase_ ).shuffle_data_sources(lowerCAmelCase_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : Dict = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__lowercase : int = SparkExamplesIterable(lowerCAmelCase_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__lowercase : List[str] = SparkExamplesIterable(lowerCAmelCase_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase_ ):
__lowercase , __lowercase : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
__lowercase : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__lowercase : Dict = spark.range(100 ).repartition(1 )
__lowercase : Tuple = Spark(lowerCAmelCase_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 649
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 1
|
from __future__ import annotations
def a ( A__ ) -> list[int]:
'''simple docstring'''
if len(A__ ) == 0:
return array
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = min(A__ ), max(A__ )
# Compute the variables
SCREAMING_SNAKE_CASE__ : Dict = _max - _min + 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
SCREAMING_SNAKE_CASE__ : List[str] = i - _min
SCREAMING_SNAKE_CASE__ : Optional[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(A__ ):
while holes_repeat[i] > 0:
SCREAMING_SNAKE_CASE__ : str = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ :Any = input('Enter numbers separated by comma:\n')
a_ :Any = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 35
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35
| 1
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase : Optional[Any] = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Path , __lowerCamelCase : Union[str, None] = None , __lowerCamelCase : Union[List[str], None] = None , __lowerCamelCase : Union[str, List[str], None] = None , __lowerCamelCase : bool = True , ) -> int:
a = [file for file in os.listdir(__lowerCamelCase ) if os.path.isfile(os.path.join(__lowerCamelCase , __lowerCamelCase ) )]
if identifier is not None:
a = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
for n_ in n_identifier:
a = [file for file in files if n_ not in file]
else:
a = [file for file in files if n_identifier not in file]
a = ignore_files or []
ignore_files.append("__init__.py" )
a = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , __lowerCamelCase )
if only_modules:
a = file.split("." )[0]
try:
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = doctest.DocTestSuite(__lowerCamelCase )
a = unittest.TextTestRunner().run(__lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
a = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __UpperCAmelCase ( self : List[str] ) -> str:
a = Path("src/transformers" )
a = "modeling"
a = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(__lowerCamelCase , identifier=__lowerCamelCase , ignore_files=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Any:
a = Path("src/transformers" )
a = "tokenization"
self.analyze_directory(__lowerCamelCase , identifier=__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
a = Path("src/transformers" )
a = "configuration"
self.analyze_directory(__lowerCamelCase , identifier=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a = Path("src/transformers" )
a = ["configuration", "modeling", "tokenization"]
self.analyze_directory(__lowerCamelCase , n_identifier=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
a = Path("docs/source" )
a = ["favicon.ico"]
self.analyze_directory(__lowerCamelCase , ignore_files=__lowerCamelCase , only_modules=__lowerCamelCase )
| 662
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662
| 1
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _a :
def __init__( self: int , UpperCamelCase_: int , UpperCamelCase_: str=13 , UpperCamelCase_: int=7 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=99 , UpperCamelCase_: int=32 , UpperCamelCase_: Optional[Any]=5 , UpperCamelCase_: Any=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: str=16 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: Tuple=None , ) -> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , ) -> Tuple:
"""simple docstring"""
lowercase__ = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: List[str] , ) -> Tuple:
"""simple docstring"""
lowercase__ = True
lowercase__ = True
lowercase__ = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowercase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowercase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_lowercase : Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
_lowercase : Any = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Optional[int] = False
_lowercase : List[Any] = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = LlamaModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = input_dict['''input_ids''']
lowercase__ = input_ids.ne(1 ).to(UpperCamelCase_ )
lowercase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self: Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = '''single_label_classification'''
lowercase__ = input_dict['''input_ids''']
lowercase__ = input_ids.ne(1 ).to(UpperCamelCase_ )
lowercase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = '''multi_label_classification'''
lowercase__ = input_dict['''input_ids''']
lowercase__ = input_ids.ne(1 ).to(UpperCamelCase_ )
lowercase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ids_tensor([1, 10] , config.vocab_size )
lowercase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowercase__ = original_model(UpperCamelCase_ ).last_hidden_state
lowercase__ = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ = {'''type''': scaling_type, '''factor''': 10.0}
lowercase__ = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowercase__ = scaled_model(UpperCamelCase_ ).last_hidden_state
lowercase__ = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class _a ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowercase__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase__ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowercase__ = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowercase__ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowercase__ = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowercase__ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowercase__ = model(torch.tensor(UpperCamelCase_ ) )
lowercase__ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase__ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowercase__ = '''Simply put, the theory of relativity states that '''
lowercase__ = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowercase__ = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowercase__ = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowercase__ = model.generate(UpperCamelCase_ , max_new_tokens=64 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowercase__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 43
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 670
| 0
|
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_snake_case : List[str] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_snake_case : Optional[Any] = (
subprocess.check_output(F"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode("""utf-8""").split()
)
_snake_case : Optional[int] = """|""".join(sys.argv[1:])
_snake_case : int = re.compile(RF"^({joined_dirs}).*?\.py$")
_snake_case : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 493
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_snake_case : Optional[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _SCREAMING_SNAKE_CASE : Tuple ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
from transformers.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 493
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( A_=None ,A_=None):
return field(default_factory=lambda: default ,metadata=A_)
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
UpperCamelCase__ = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""})
UpperCamelCase__ = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""})
UpperCamelCase__ = field(default=UpperCamelCase__ , metadata={"""help""": """Use FP16 to accelerate inference."""})
UpperCamelCase__ = field(default=UpperCamelCase__ , metadata={"""help""": """Benchmark training of model"""})
UpperCamelCase__ = field(default=UpperCamelCase__ , metadata={"""help""": """Verbose memory tracing"""})
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
UpperCamelCase__ = field(default=UpperCamelCase__ , metadata={"""help""": """Trace memory line by line"""})
UpperCamelCase__ = field(default=UpperCamelCase__ , metadata={"""help""": """Save result to a CSV file"""})
UpperCamelCase__ = field(default=UpperCamelCase__ , metadata={"""help""": """Save all print statements in a log file"""})
UpperCamelCase__ = field(default=UpperCamelCase__ , metadata={"""help""": """Whether to print environment information"""})
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
UpperCamelCase__ = field(
default=F'''inference_time_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
UpperCamelCase__ = field(
default=F'''inference_memory_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
UpperCamelCase__ = field(
default=F'''train_time_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
UpperCamelCase__ = field(
default=F'''train_memory_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
UpperCamelCase__ = field(
default=F'''env_info_{round(time())}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
UpperCamelCase__ = field(
default=F'''log_{round(time())}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
UpperCamelCase__ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""})
UpperCamelCase__ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , __lowerCamelCase , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 380
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__: Union[str, Any] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowerCAmelCase_ ( A_ ,A_=None):
require_version(deps[pkg] ,A_)
| 380
| 1
|
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any]=13 , snake_case_ : Union[str, Any]=7 , snake_case_ : Optional[int]=True , snake_case_ : Any=True , snake_case_ : Union[str, Any]=True , snake_case_ : Tuple=True , snake_case_ : Union[str, Any]=99 , snake_case_ : List[Any]=32 , snake_case_ : List[Any]=5 , snake_case_ : Tuple=4 , snake_case_ : Optional[int]=4 , snake_case_ : Optional[int]="gelu" , snake_case_ : Union[str, Any]=0.0 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=512 , snake_case_ : Tuple=16 , snake_case_ : Dict=2 , snake_case_ : str=0.02 , snake_case_ : Union[str, Any]=3 , snake_case_ : Any=4 , snake_case_ : Union[str, Any]=None , ):
snake_case__ : Tuple = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : List[Any] = use_input_mask
snake_case__ : Any = use_token_type_ids
snake_case__ : Dict = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : List[str] = intermediate_multiple_size
snake_case__ : List[str] = hidden_act
snake_case__ : Dict = hidden_dropout
snake_case__ : Union[str, Any] = attention_dropout
snake_case__ : List[str] = weight_tying
snake_case__ : Dict = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : List[Any] = num_labels
snake_case__ : Optional[int] = num_choices
snake_case__ : Dict = scope
def lowerCamelCase ( self : str ):
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Any = None
if self.use_input_mask:
snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase ( self : int ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ : Dict = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase ( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Any ):
snake_case__ : Optional[Any] = GPTNeoXJapaneseModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : int = model(snake_case_ , attention_mask=snake_case_ )
snake_case__ : Dict = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[Any] ):
snake_case__ : Union[str, Any] = True
snake_case__ : List[Any] = GPTNeoXJapaneseModel(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Tuple , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : str ):
snake_case__ : str = GPTNeoXJapaneseForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[str] ):
snake_case__ : Any = True
snake_case__ : Dict = GPTNeoXJapaneseForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
snake_case__ : int = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
snake_case__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , output_hidden_states=snake_case_ )
snake_case__ : Optional[Any] = output_from_no_past["""hidden_states"""][0]
snake_case__ : List[str] = model(
snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )["""hidden_states"""][0]
# select random slice
snake_case__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : List[str] = config_and_inputs
snake_case__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowercase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowercase = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : str ):
snake_case__ : Any = GPTNeoXJapaneseModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase ( self : int ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : int ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Dict ):
# This regression test was failing with PyTorch < 1.3
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : int ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
@slow
def lowerCamelCase ( self : List[str] ):
snake_case__ : Dict = """abeja/gpt-neox-japanese-2.7b"""
snake_case__ : Optional[int] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
snake_case__ : Dict = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
snake_case__ : Union[str, Any] = GPTNeoXJapaneseTokenizer.from_pretrained(snake_case_ )
snake_case__ : List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(snake_case_ )
snake_case__ : Optional[int] = []
for prompt in prompts:
snake_case__ : Dict = tokenizer(snake_case_ , return_tensors="""pt""" ).input_ids
snake_case__ : List[Any] = model.generate(snake_case_ , max_length=50 )
snake_case__ : List[Any] = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
predicted_outputs += generated_string
self.assertListEqual(snake_case_ , snake_case_ )
| 701
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Any = hf_hub_url(repo_id=_lowerCAmelCase , path=_lowerCAmelCase , revision=_lowerCAmelCase )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_lowerCAmelCase )}"
| 301
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase__ ( __magic_name__ : str , __magic_name__ : str ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = old_name
if "patch_embed" in old_name:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = old_name.split('''.''' )
if layer == "0":
lowerCAmelCase : Union[str, Any] = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
lowerCAmelCase : Any = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
lowerCAmelCase : Optional[int] = old_name.replace('''3''' , '''convolution2''' )
else:
lowerCAmelCase : Tuple = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , __magic_name__ ):
lowerCAmelCase : Optional[Any] = R'''\b\d{2}\b'''
if bool(re.search(__magic_name__ , __magic_name__ ) ):
lowerCAmelCase : Union[str, Any] = re.search(R'''\d\.\d\d.''' , __magic_name__ ).group()
else:
lowerCAmelCase : Tuple = re.search(R'''\d\.\d.''' , __magic_name__ ).group()
if int(match[0] ) < 6:
lowerCAmelCase : Optional[Any] = old_name.replace(__magic_name__ , '''''' )
lowerCAmelCase : List[str] = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
lowerCAmelCase : Optional[int] = '''intermediate_stages.''' + trimmed_name
else:
lowerCAmelCase : str = old_name.replace(__magic_name__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCAmelCase : str = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
lowerCAmelCase : int = str(int(match[2] ) - num_meta4D_last_stage )
lowerCAmelCase : int = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
lowerCAmelCase : int = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
lowerCAmelCase : int = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
lowerCAmelCase : Any = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
lowerCAmelCase : Tuple = trimmed_name.replace('''fc2''' , '''linear_out''' )
lowerCAmelCase : int = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , __magic_name__ ):
lowerCAmelCase : Union[str, Any] = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
lowerCAmelCase : List[str] = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCAmelCase : List[Any] = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCAmelCase : List[str] = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
lowerCAmelCase : int = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
lowerCAmelCase : Dict = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
lowerCAmelCase : Dict = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
lowerCAmelCase : Union[str, Any] = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCAmelCase : List[str] = new_name.replace('''norm''' , '''layernorm''' )
lowerCAmelCase : Optional[Any] = '''efficientformer.''' + new_name
else:
lowerCAmelCase : Optional[int] = '''efficientformer.encoder.''' + new_name
return new_name
def UpperCAmelCase__ ( __magic_name__ : Tuple , __magic_name__ : Optional[int] ):
'''simple docstring'''
for key in checkpoint.copy().keys():
lowerCAmelCase : Union[str, Any] = checkpoint.pop(__magic_name__ )
lowerCAmelCase : List[str] = val
return checkpoint
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : str = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return image
def UpperCAmelCase__ ( __magic_name__ : Path , __magic_name__ : Path , __magic_name__ : Path , __magic_name__ : bool ):
'''simple docstring'''
lowerCAmelCase : Any = torch.load(__magic_name__ , map_location='''cpu''' )['''model''']
lowerCAmelCase : Dict = EfficientFormerConfig.from_json_file(__magic_name__ )
lowerCAmelCase : List[str] = EfficientFormerForImageClassificationWithTeacher(__magic_name__ )
lowerCAmelCase : int = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
lowerCAmelCase : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
lowerCAmelCase : List[Any] = convert_torch_checkpoint(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowerCAmelCase : Any = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
lowerCAmelCase : Any = prepare_img()
lowerCAmelCase : List[str] = 2_56
lowerCAmelCase : Optional[Any] = 2_24
lowerCAmelCase : int = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
lowerCAmelCase : Tuple = processor(images=__magic_name__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
lowerCAmelCase : Any = Compose(
[
Resize(__magic_name__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(__magic_name__ ),
ToTensor(),
Normalize(__magic_name__ , __magic_name__ ),
] )
lowerCAmelCase : Dict = image_transforms(__magic_name__ ).unsqueeze(0 )
assert torch.allclose(__magic_name__ , __magic_name__ )
lowerCAmelCase : int = model(__magic_name__ )
lowerCAmelCase : Optional[int] = outputs.logits
lowerCAmelCase : Optional[Any] = (1, 10_00)
if "l1" in model_name:
lowerCAmelCase : Optional[Any] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , __magic_name__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCAmelCase : List[str] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , __magic_name__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCAmelCase : int = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(__magic_name__ )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=__magic_name__ , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=__magic_name__ , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 348
|
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Any , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Optional[Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Tuple , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : str , *lowerCamelCase__ : Any , **lowerCamelCase__ : Union[str, Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : List[str] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : str ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : str , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Optional[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Tuple , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : int , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : str ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : List[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[Any] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Tuple , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Dict , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : List[str] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : List[str] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : List[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : int , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ):
requires_backends(cls , ['''flax'''] )
| 348
| 1
|
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (UpperCamelCase__ ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.25 , width=0.25 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*_a ).arrange(_a , buff=0 )
_a = VGroup(*_a ).arrange(_a , buff=0 )
_a = VGroup(_a , _a ).arrange(_a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*_a ).arrange(_a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*_a ).arrange(_a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_a = []
_a = []
_a = []
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*_a ).arrange(_a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_a = []
_a = []
for i, rect in enumerate(_a ):
_a = fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_a = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_a = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_a = [meta_mem.copy() for i in range(6 )]
_a = [meta_mem.copy() for i in range(6 )]
_a = VGroup(*_a ).arrange(_a , buff=0 )
_a = VGroup(*_a ).arrange(_a , buff=0 )
_a = VGroup(_a , _a ).arrange(_a , buff=0 )
_a = Text("Disk" , font_size=24 )
_a = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_a = []
for i, rect in enumerate(_a ):
_a = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_a = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait()
| 710
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> int:
return 1 if input_a == input_a else 0
def _lowerCamelCase ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 521
| 0
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *_A : Tuple , **_A : List[str] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : Any , _A : Dict , _A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = vqa_pipeline(_A , top_k=1 )
self.assertEqual(
_A , [
[{'''score''': ANY(_A ), '''answer''': ANY(_A )}],
[{'''score''': ANY(_A ), '''answer''': ANY(_A )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
__SCREAMING_SNAKE_CASE : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : str = vqa_pipeline(image=_A , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_A , [{'''score''': ANY(_A ), '''answer''': ANY(_A )}, {'''score''': ANY(_A ), '''answer''': ANY(_A )}] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_A , [{'''score''': ANY(_A ), '''answer''': ANY(_A )}, {'''score''': ANY(_A ), '''answer''': ANY(_A )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = vqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
__SCREAMING_SNAKE_CASE : Any = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
__SCREAMING_SNAKE_CASE : Optional[int] = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [[{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
pass
| 74
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
UpperCamelCase_ = '''bit'''
UpperCamelCase_ = ['''preactivation''', '''bottleneck''']
UpperCamelCase_ = ['''SAME''', '''VALID''']
def __init__( self : Any , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Any=64 , UpperCAmelCase : List[str]=[256, 512, 1024, 2048] , UpperCAmelCase : Dict=[3, 4, 6, 3] , UpperCAmelCase : List[Any]="preactivation" , UpperCAmelCase : Tuple="relu" , UpperCAmelCase : Tuple=None , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict=32 , UpperCAmelCase : Any=1 , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : str , ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase : Any =global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
lowercase : List[Any] =num_channels
lowercase : List[str] =embedding_size
lowercase : Union[str, Any] =hidden_sizes
lowercase : Union[str, Any] =depths
lowercase : Optional[int] =layer_type
lowercase : Optional[Any] =hidden_act
lowercase : List[str] =global_padding
lowercase : Dict =num_groups
lowercase : Optional[Any] =drop_path_rate
lowercase : Union[str, Any] =embedding_dynamic_padding
lowercase : Any =output_stride
lowercase : Union[str, Any] =width_factor
lowercase : Union[str, Any] =['''stem'''] + [f'stage{idx}' for idx in range(1 , len(UpperCAmelCase ) + 1 )]
lowercase , lowercase : List[str] =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 94
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any ="roberta-prelayernorm"
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.0_2 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__magic_name__ : Dict = vocab_size
__magic_name__ : List[str] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : Tuple = num_attention_heads
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Optional[int] = max_position_embeddings
__magic_name__ : Optional[int] = type_vocab_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : Dict = layer_norm_eps
__magic_name__ : str = position_embedding_type
__magic_name__ : Optional[int] = use_cache
__magic_name__ : List[Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE ):
@property
def lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 713
|
from __future__ import annotations
class A__ :
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
__magic_name__ : List[str] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase ( UpperCAmelCase ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
__magic_name__ : Tuple = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : List[str] = Node(4 )
__magic_name__ : str = Node(5 )
__magic_name__ : List[Any] = Node(6 )
__magic_name__ : Optional[int] = Node(7 )
__magic_name__ : str = Node(8 )
__magic_name__ : str = Node(9 )
print(is_full_binary_tree(UpperCAmelCase ) )
print(depth_of_tree(UpperCAmelCase ) )
print('''Tree is: ''' )
display(UpperCAmelCase )
if __name__ == "__main__":
main()
| 336
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase ( __magic_name__ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
"""simple docstring"""
lowercase__ = []
if isinstance(__magic_name__ , __magic_name__ ):
for v in tree.values():
shapes.extend(_fetch_dims(__magic_name__ ) )
elif isinstance(__magic_name__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__magic_name__ ) )
elif isinstance(__magic_name__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Tuple[int, ...] ) -> Tuple[int, ...]:
"""simple docstring"""
lowercase__ = []
for d in reversed(__magic_name__ ):
idx.append(flat_idx % d )
lowercase__ = flat_idx // d
return tuple(reversed(__magic_name__ ) )
@torch.jit.ignore
def UpperCamelCase ( __magic_name__ : Sequence[int] , __magic_name__ : Sequence[int] , __magic_name__ : Sequence[int] , __magic_name__ : Optional[Sequence[bool]] = None , __magic_name__ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
"""simple docstring"""
def reduce_edge_list(__magic_name__ : List[bool] ) -> None:
lowercase__ = True
for i in range(len(__magic_name__ ) ):
lowercase__ = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ = l[reversed_idx]
if start_edges is None:
lowercase__ = [s == 0 for s in start]
reduce_edge_list(__magic_name__ )
if end_edges is None:
lowercase__ = [e == (d - 1) for e, d in zip(__magic_name__ , __magic_name__ )]
reduce_edge_list(__magic_name__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__magic_name__ ) == 0:
return [()]
elif len(__magic_name__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowercase__ = []
lowercase__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__magic_name__ , __magic_name__ ):
if s == e:
path_list.append(slice(__magic_name__ , s + 1 ) )
else:
break
lowercase__ = tuple(__magic_name__ )
lowercase__ = len(__magic_name__ )
# start == end, and we're done
if divergence_idx == len(__magic_name__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = start[divergence_idx]
return tuple(
path + (slice(__magic_name__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = end[divergence_idx]
return tuple(
path + (slice(__magic_name__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowercase__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCamelCase ( __magic_name__ : torch.Tensor , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> torch.Tensor:
"""simple docstring"""
lowercase__ = t.shape[:no_batch_dims]
lowercase__ = list(_flat_idx_to_idx(__magic_name__ , __magic_name__ ) )
# _get_minimal_slice_set is inclusive
lowercase__ = list(_flat_idx_to_idx(flat_end - 1 , __magic_name__ ) )
# Get an ordered list of slices to perform
lowercase__ = _get_minimal_slice_set(
__magic_name__ , __magic_name__ , __magic_name__ , )
lowercase__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCamelCase ( __magic_name__ : Callable , __magic_name__ : Dict[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : bool = False , __magic_name__ : Any = None , __magic_name__ : bool = False , ) -> Any:
"""simple docstring"""
if not (len(__magic_name__ ) > 0):
raise ValueError("""Must provide at least one input""" )
lowercase__ = [shape[:no_batch_dims] for shape in _fetch_dims(__magic_name__ )]
lowercase__ = tuple([max(__magic_name__ ) for s in zip(*__magic_name__ )] )
def _prep_inputs(__magic_name__ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowercase__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowercase__ = tensor_tree_map(_prep_inputs , __magic_name__ )
lowercase__ = None
if _out is not None:
lowercase__ = tensor_tree_map(lambda __magic_name__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowercase__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__magic_name__ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ = 0
lowercase__ = prepped_outputs
for _ in range(__magic_name__ ):
# Chunk the input
if not low_mem:
lowercase__ = _select_chunk
else:
lowercase__ = partial(
_chunk_slice , flat_start=__magic_name__ , flat_end=min(__magic_name__ , i + chunk_size ) , no_batch_dims=len(__magic_name__ ) , )
lowercase__ = tensor_tree_map(__magic_name__ , __magic_name__ )
# Run the layer on the chunk
lowercase__ = layer(**__magic_name__ )
# Allocate space for the output
if out is None:
lowercase__ = tensor_tree_map(lambda __magic_name__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __magic_name__ )
# Put the chunk in its pre-allocated space
if isinstance(__magic_name__ , __magic_name__ ):
def assign(__magic_name__ : dict , __magic_name__ : dict ) -> None:
for k, v in da.items():
if isinstance(__magic_name__ , __magic_name__ ):
assign(__magic_name__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ = da[k]
assign(__magic_name__ , __magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
for xa, xa in zip(__magic_name__ , __magic_name__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ = xa
elif isinstance(__magic_name__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
lowercase__ = tensor_tree_map(lambda __magic_name__ : t.view(orig_batch_dims + t.shape[1:] ) , __magic_name__ )
return out
class A :
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : int = 512 , ) -> str:
"""simple docstring"""
lowercase__ = max_chunk_size
lowercase__ = None
lowercase__ = None
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Callable , _UpperCAmelCase : tuple , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ = [c for c in candidates if c > min_chunk_size]
lowercase__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_UpperCAmelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*_UpperCAmelCase , chunk_size=_UpperCAmelCase )
return True
except RuntimeError:
return False
lowercase__ = 0
lowercase__ = len(_UpperCAmelCase ) - 1
while i > min_viable_chunk_size_index:
lowercase__ = test_chunk_size(candidates[i] )
if not viable:
lowercase__ = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ = i
lowercase__ = (i + len(_UpperCAmelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Iterable , _UpperCAmelCase : Iterable ) -> bool:
"""simple docstring"""
lowercase__ = True
for aa, aa in zip(_UpperCAmelCase , _UpperCAmelCase ):
assert type(_UpperCAmelCase ) == type(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase : x[0] )]
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase )
else:
consistent &= aa == aa
return consistent
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Callable , _UpperCAmelCase : tuple , _UpperCAmelCase : int , ) -> int:
"""simple docstring"""
lowercase__ = True
lowercase__ = tree_map(lambda _UpperCAmelCase : a.shape if isinstance(_UpperCAmelCase , torch.Tensor ) else a , _UpperCAmelCase , _UpperCAmelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_UpperCAmelCase )
lowercase__ = self._compare_arg_caches(self.cached_arg_data , _UpperCAmelCase )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ = False
if not consistent:
lowercase__ = self._determine_favorable_chunk_size(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
lowercase__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 15
|
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a =object()
# For specifying empty leaf dict `{}`
a =object()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
__lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = _get_partition_rules()
__lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ )
__lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
__lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 652
| 0
|
def UpperCAmelCase__( __UpperCAmelCase : int = 10**12 ):
__snake_case : List[Any] = 1
__snake_case : str = 0
__snake_case : str = 1
__snake_case : int = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowercase : Union[str, Any] = '''facebook/nllb-200-distilled-600M'''
__lowercase : Optional[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
__lowercase : Tuple = '''translator'''
__lowercase : str = AutoTokenizer
__lowercase : Tuple = AutoModelForSeqaSeqLM
__lowercase : Optional[Any] = LANGUAGE_CODES
__lowercase : Any = ['''text''', '''text''', '''text''']
__lowercase : Dict = ['''text''']
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language.")
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language.")
__SCREAMING_SNAKE_CASE = self.lang_to_code[src_lang]
__SCREAMING_SNAKE_CASE = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
return self.model.generate(**lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__)
| 155
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE_ = tokenizer('Hello there' , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).loss
SCREAMING_SNAKE_CASE_ = -tf.math.reduce_mean(SCREAMING_SNAKE_CASE ).numpy()
SCREAMING_SNAKE_CASE_ = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 205
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 566
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__( snake_case_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = relative_attention
__lowercase = position_biased_input
__lowercase = pos_att_type
__lowercase = scope
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_config()
__lowercase = 3_0_0
return config
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = DebertaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
__lowercase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
__lowercase = model(__UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = DebertaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DebertaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DebertaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = DebertaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase : str = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : List[Any] = True
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Tuple = False
UpperCamelCase : List[str] = False
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = DebertaModelTester(self )
__lowercase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def __magic_name__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DebertaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
__lowercase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
# compare the actual values for a slice.
__lowercase = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 566
| 1
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_a = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a=None , __a=1) -> Dict:
'''simple docstring'''
_UpperCamelCase = tokenizer
_UpperCamelCase = dataset
_UpperCamelCase = len(__a) if n_tasks is None else n_tasks
_UpperCamelCase = n_copies
def __iter__( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
_UpperCamelCase = self.tokenizer(__a , padding=__a , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = start_length
_UpperCamelCase = eof_strings
_UpperCamelCase = tokenizer
def __call__( self , __a , __a , **__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
_UpperCamelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(__a)
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = re.split('''(%s)''' % '''|'''.join(__snake_case ), __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case=20, **__snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_UpperCamelCase = batch['''ids'''].shape[-1]
_UpperCamelCase = accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']], num_return_sequences=__snake_case, **__snake_case )
# each task is generated batch_size times
_UpperCamelCase = batch['''task_id'''].repeat(__snake_case )
_UpperCamelCase = accelerator.pad_across_processes(
__snake_case, dim=1, pad_index=tokenizer.pad_token_id )
_UpperCamelCase , _UpperCamelCase = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCamelCase = generated_tokens.cpu().numpy()
_UpperCamelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case, __snake_case ):
gen_token_dict[task].append(__snake_case )
_UpperCamelCase = [[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCamelCase = tokenizer.decode(__snake_case, skip_special_tokens=__snake_case, clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser(__snake_case )
_UpperCamelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCamelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCamelCase = '''false'''
if args.num_workers is None:
_UpperCamelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCamelCase = Accelerator()
set_seed(args.seed, device_specific=__snake_case )
# Load model and tokenizer
_UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCamelCase = tokenizer.eos_token
_UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCamelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0, __snake_case, __snake_case )] ),
}
# Load evaluation dataset and metric
_UpperCamelCase = load_dataset('''openai_humaneval''' )
_UpperCamelCase = load_metric('''code_eval''' )
_UpperCamelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_UpperCamelCase = args.n_samples // args.batch_size
_UpperCamelCase = TokenizedDataset(__snake_case, human_eval['''test'''], n_copies=__snake_case, n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCamelCase = DataLoader(__snake_case, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCamelCase = code_eval_metric.compute(references=[''''''], predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_UpperCamelCase , _UpperCamelCase = accelerator.prepare(__snake_case, __snake_case )
_UpperCamelCase = complete_code(
__snake_case, __snake_case, __snake_case, __snake_case, n_tasks=__snake_case, batch_size=args.batch_size, **__snake_case, )
if accelerator.is_main_process:
_UpperCamelCase = []
for task in tqdm(range(__snake_case ) ):
_UpperCamelCase = human_eval['''test'''][task]['''test''']
_UpperCamelCase = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCamelCase , _UpperCamelCase = code_eval_metric.compute(
references=__snake_case, predictions=__snake_case, num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file, '''w''' ) as fp:
json.dump(__snake_case, __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 78
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , ) -> int:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 20}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_flip_channel_order
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTImageProcessingTester(self)
@property
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_center_crop'''))
self.assertTrue(hasattr(__a , '''center_crop'''))
self.assertTrue(hasattr(__a , '''do_flip_channel_order'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 78
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Any = '''van'''
def __init__( self : Dict , __lowerCAmelCase : Any=2_2_4 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[Any]=[7, 3, 3, 3] , __lowerCAmelCase : Tuple=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 1_2, 3] , __lowerCAmelCase : str=[8, 8, 4, 4] , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]=1E-6 , __lowerCAmelCase : int=1E-2 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
__snake_case = image_size
__snake_case = num_channels
__snake_case = patch_sizes
__snake_case = strides
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = mlp_ratios
__snake_case = hidden_act
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = layer_scale_init_value
__snake_case = drop_path_rate
__snake_case = dropout_rate
| 356
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class a_ :
def __init__( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]=1_3 , __lowerCAmelCase : int=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=9_9 , __lowerCAmelCase : List[str]=3_2 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple="None" , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = relative_attention
__snake_case = position_biased_input
__snake_case = pos_att_type
__snake_case = scope
def lowercase__ ( self : Dict ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ):
__snake_case = TFDebertaVaModel(config=__lowerCAmelCase )
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__snake_case = [input_ids, input_mask]
__snake_case = model(__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
__snake_case = TFDebertaVaForMaskedLM(config=__lowerCAmelCase )
__snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
__snake_case = self.num_labels
__snake_case = TFDebertaVaForSequenceClassification(config=__lowerCAmelCase )
__snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ):
__snake_case = self.num_labels
__snake_case = TFDebertaVaForTokenClassification(config=__lowerCAmelCase )
__snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ):
__snake_case = TFDebertaVaForQuestionAnswering(config=__lowerCAmelCase )
__snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : List[str] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ : int = False
lowercase_ : List[Any] = False
def lowercase__ ( self : Any ):
__snake_case = TFDebertaVaModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowercase__ ( self : List[Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def lowercase__ ( self : Any ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowercase__ ( self : Dict ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def lowercase__ ( self : List[str] ):
__snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a_ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def lowercase__ ( self : List[str] ):
pass
@slow
def lowercase__ ( self : int ):
__snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
__snake_case = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
__snake_case = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 )
| 356
| 1
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
A = HfApi()
A = {}
# fmt: off
A = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
A = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
A = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
A = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
A = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
A = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
A = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
A = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
A = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
A = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
A = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
A = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
A = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
A = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
A = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
A = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
A = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
A = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
A = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
A = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
A = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
A = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 707
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __a :
'''simple docstring'''
UpperCAmelCase__ : List[str]
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "dict"
UpperCAmelCase__ : ClassVar[Any] = None
UpperCAmelCase__ : str = field(default="""Translation""" , init=__A , repr=__A )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __snake_case ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __a :
'''simple docstring'''
UpperCAmelCase__ : Optional[List] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "dict"
UpperCAmelCase__ : ClassVar[Any] = None
UpperCAmelCase__ : str = field(default="""TranslationVariableLanguages""" , init=__A , repr=__A )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE_ : Dict = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set(self.languages )
if self.languages and set(UpperCamelCase__ ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(UpperCamelCase__ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_ : List[str] = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = zip(*sorted(UpperCamelCase__ ) )
return {"language": languages, "translation": translations}
def __snake_case ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 97
| 0
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase__ = spec.loader.load_module()
UpperCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase__ = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
UpperCAmelCase__ = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def _UpperCAmelCase ( ) -> Dict:
_snake_case = []
for config_class in list(CONFIG_MAPPING.values() ):
_snake_case = False
# source code of `config_class`
_snake_case = inspect.getsource(_a )
_snake_case = _re_checkpoint.findall(_a )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_snake_case = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_snake_case = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_snake_case = True
break
_snake_case = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_a )
if len(_a ) > 0:
_snake_case = '''\n'''.join(sorted(_a ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 224
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE : Any = False
try:
SCREAMING_SNAKE_CASE : List[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = [] ):
lowercase_ :str = 0
lowercase_ :str = choices
lowercase_ :List[str] = prompt
if sys.platform == "win32":
lowercase_ :List[Any] = '''*'''
else:
lowercase_ :str = '''➔ '''
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = 1 ):
lowercase_ :Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCamelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] )
def UpperCamelCase ( self ):
lowercase_ :int = int(chr(self.current_selection ) )
lowercase_ :Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def UpperCamelCase ( self , UpperCamelCase_ = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowercase_ :str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ :Optional[Any] = int(builtins.input() )
except ValueError:
lowercase_ :List[Any] = default_choice
else:
lowercase_ :List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCamelCase_ , '''\n''' )
return choice
| 257
| 0
|
from ...configuration_utils import PretrainedConfig
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = "bert-generation"
def __init__( self : Dict , snake_case : Any=50358 , snake_case : Union[str, Any]=1024 , snake_case : str=24 , snake_case : int=16 , snake_case : List[str]=4096 , snake_case : Optional[Any]="gelu" , snake_case : Optional[int]=0.1 , snake_case : int=0.1 , snake_case : Union[str, Any]=512 , snake_case : List[str]=0.02 , snake_case : List[Any]=1E-12 , snake_case : Union[str, Any]=0 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=1 , snake_case : List[Any]="absolute" , snake_case : Optional[int]=True , **snake_case : Union[str, Any] , ):
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
| 700
|
import heapq
import sys
import numpy as np
a_ = tuple[int, int]
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
__UpperCamelCase = []
__UpperCamelCase = set()
def snake_case ( self : Dict ):
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case ( self : Optional[Any] ):
return len(self.elements ) == 0
def snake_case ( self : Optional[int] , snake_case : Tuple , snake_case : Any ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case )
else:
# update
# print("update", item)
__UpperCamelCase = []
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case ( self : Any , snake_case : int ):
if item in self.set:
self.set.remove(snake_case )
__UpperCamelCase = []
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case ( self : Tuple ):
return self.elements[0][1]
def snake_case ( self : List[str] ):
((__UpperCamelCase) , (__UpperCamelCase)) = heapq.heappop(self.elements )
self.set.remove(snake_case )
return (priority, item)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = np.array(lowercase_ )
__UpperCamelCase = np.array(lowercase_ )
return np.linalg.norm(a - b )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return consistent_heuristic(lowercase_ , lowercase_ ) // t
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
__UpperCamelCase = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ )
return ans
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
__UpperCamelCase = np.chararray((n, n) )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
__UpperCamelCase = '''*'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (j, (n - 1) - i) in blocks:
__UpperCamelCase = '''#'''
__UpperCamelCase = '''-'''
__UpperCamelCase = back_pointer[goal]
while x != start:
((__UpperCamelCase) , (__UpperCamelCase)) = x
# print(x)
__UpperCamelCase = '''-'''
__UpperCamelCase = back_pointer[x]
__UpperCamelCase = '''-'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCamelCase = back_pointer[goal]
while x != start:
print(lowercase_ , end=''' ''' )
__UpperCamelCase = back_pointer[x]
print(lowercase_ )
sys.exit()
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
"""simple docstring"""
for itera in range(lowercase_ ):
open_list[itera].remove_element(lowercase_ )
# print("s", s)
# print("j", j)
((__UpperCamelCase) , (__UpperCamelCase)) = s
__UpperCamelCase = (x - 1, y)
__UpperCamelCase = (x + 1, y)
__UpperCamelCase = (x, y + 1)
__UpperCamelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowercase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowercase_ )
__UpperCamelCase = -1
__UpperCamelCase = float('''inf''' )
if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCamelCase = g_function[s] + 1
__UpperCamelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowercase_ ):
if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key(
lowercase_ , 0 , lowercase_ , lowercase_ ):
open_list[j].put(
lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
__UpperCamelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ = make_common_ground()
a_ = blocks_blk
# hyper parameters
a_ = 1
a_ = 1
a_ = 20
a_ = 3 # one consistent and two other inconsistent
# start and end destination
a_ = (0, 0)
a_ = (n - 1, n - 1)
a_ = 1
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
__UpperCamelCase = {start: 0, goal: float('''inf''' )}
__UpperCamelCase = {start: -1, goal: -1}
__UpperCamelCase = []
__UpperCamelCase = set()
for i in range(lowercase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
__UpperCamelCase = []
__UpperCamelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowercase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
__UpperCamelCase , __UpperCamelCase = open_list[i].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_inad.append(lowercase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
__UpperCamelCase = open_list[0].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_anchor.append(lowercase_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowercase_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 375
| 0
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowerCAmelCase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , UpperCamelCase_ )
if matches:
__lowerCAmelCase = float(matches[1] )
__lowerCAmelCase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowerCAmelCase = 1_0_0_1
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(UpperCamelCase_ ) + 1: v for k, v in idalabel.items()}
__lowerCAmelCase = "background"
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any]=False ):
"""simple docstring"""
__lowerCAmelCase = get_mobilenet_va_config(UpperCamelCase_ )
# Load 🤗 model
__lowerCAmelCase = MobileNetVaForImageClassification(UpperCamelCase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowerCAmelCase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = model(**UpperCamelCase_ )
__lowerCAmelCase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowerCAmelCase = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
__lowerCAmelCase = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
__lowerCAmelCase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
__lowerCAmelCase = "google/" + model_name
image_processor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 611
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __lowerCAmelCase ( unittest.TestCase , lowercase ):
"""simple docstring"""
def _UpperCAmelCase ( self : List[str] ):
A_ = load_tool("text-classification" )
self.tool.setup()
A_ = load_tool("text-classification" , remote=lowerCAmelCase )
def _UpperCAmelCase ( self : Union[str, Any] ):
A_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowerCAmelCase , "positive" )
def _UpperCAmelCase ( self : str ):
A_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowerCAmelCase , "positive" )
def _UpperCAmelCase ( self : str ):
A_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowerCAmelCase , "positive" )
def _UpperCAmelCase ( self : List[Any] ):
A_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowerCAmelCase , "positive" )
| 452
| 0
|
"""simple docstring"""
def a__ ( a : int , a : int ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def a__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 87
|
"""simple docstring"""
def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ):
"""simple docstring"""
_snake_case : Optional[int] = right or len(a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a , a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87
| 1
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : Dict = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : List[Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : List[str] = list(model.children() )[:-2]
__UpperCAmelCase : Optional[Any] = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : Union[str, Any] = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : List[str] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : str = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Optional[Any] = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : Any = tokenizer
__UpperCAmelCase : List[Any] = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : List[str] = max_seq_length
__UpperCAmelCase : int = transforms
def __len__( self : int ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Dict = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Any = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : str ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : Tuple = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : Tuple = input_row["""sentence"""]
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Tuple = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : List[str] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : List[Any] = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : Any = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 139
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCAmelCase : List[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
UpperCAmelCase : str = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
UpperCAmelCase : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
UpperCAmelCase : Optional[int] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 139
| 1
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = IFPipeline
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def A_ ( self : Union[str, Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Any=0 ) -> List[str]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA" )
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2, )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def A_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : int ) -> List[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
# if
SCREAMING_SNAKE_CASE__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0", variant="fp16", torch_dtype=torch.floataa, text_encoder=_UpperCAmelCase, tokenizer=_UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = pipe_a.encode_prompt("anime turtle", device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : List[Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Tuple = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : int = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=_UpperCAmelCase, negative_prompt_embeds=_UpperCAmelCase, num_inference_steps=2, generator=_UpperCAmelCase, output_type="np", )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
SCREAMING_SNAKE_CASE__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(_UpperCAmelCase, _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe_a(
prompt_embeds=_UpperCAmelCase, negative_prompt_embeds=_UpperCAmelCase, image=_UpperCAmelCase, generator=_UpperCAmelCase, num_inference_steps=2, output_type="np", )
SCREAMING_SNAKE_CASE__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : str, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe_a(
prompt_embeds=_UpperCAmelCase, negative_prompt_embeds=_UpperCAmelCase, image=_UpperCAmelCase, num_inference_steps=2, generator=_UpperCAmelCase, output_type="np", )
SCREAMING_SNAKE_CASE__ : Any = output.images[0]
assert image.shape == (6_4, 6_4, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(_UpperCAmelCase, _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6), rng=random.Random(0 ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=_UpperCAmelCase, negative_prompt_embeds=_UpperCAmelCase, image=_UpperCAmelCase, original_image=_UpperCAmelCase, generator=_UpperCAmelCase, num_inference_steps=2, output_type="np", )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
SCREAMING_SNAKE_CASE__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : int, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(1 ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=_UpperCAmelCase, negative_prompt_embeds=_UpperCAmelCase, image=_UpperCAmelCase, mask_image=_UpperCAmelCase, num_inference_steps=2, generator=_UpperCAmelCase, output_type="np", )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
SCREAMING_SNAKE_CASE__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(_UpperCAmelCase, _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(0 ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 2_5_6, 2_5_6), rng=random.Random(0 ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6), rng=random.Random(1 ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=_UpperCAmelCase, negative_prompt_embeds=_UpperCAmelCase, image=_UpperCAmelCase, mask_image=_UpperCAmelCase, original_image=_UpperCAmelCase, generator=_UpperCAmelCase, num_inference_steps=2, output_type="np", )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
SCREAMING_SNAKE_CASE__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_UpperCAmelCase, _UpperCAmelCase )
def _a ( ) -> Optional[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 157
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCamelCase : List[str] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowerCamelCase : List[Any] = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_lowerCamelCase : List[str] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCamelCase : Optional[Any] = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowerCamelCase : Any = '''allenai'''
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict((re.sub(R"@@$" , "" , SCREAMING_SNAKE_CASE__ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE__ : Tuple = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
SCREAMING_SNAKE_CASE__ : int = d[k] # restore
return da
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
'''simple docstring'''
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
SCREAMING_SNAKE_CASE__ : str = basename(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = dirname(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE__ : List[str] = cls.hub_models()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"bpe": "fastbpe", "tokenizer": "moses"}
SCREAMING_SNAKE_CASE__ : List[str] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
SCREAMING_SNAKE_CASE__ : Dict = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , archive_map=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = vars(chkpt["args"]["model"] )
SCREAMING_SNAKE_CASE__ : Dict = args["source_lang"]
SCREAMING_SNAKE_CASE__ : Optional[int] = args["target_lang"]
SCREAMING_SNAKE_CASE__ : int = dirname(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = basename(SCREAMING_SNAKE_CASE__ )
# dicts
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , f'''dict.{src_lang}.txt''' )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , f'''dict.{tgt_lang}.txt''' )
SCREAMING_SNAKE_CASE__ : List[Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE__ : int = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab-src.json" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
break
SCREAMING_SNAKE_CASE__ : Dict = Dictionary.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab-tgt.json" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
break
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as fin:
SCREAMING_SNAKE_CASE__ : Optional[Any] = fin.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(R" \d+$" , "" , SCREAMING_SNAKE_CASE__ , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as fout:
fout.write(SCREAMING_SNAKE_CASE__ )
# model config
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.0_2,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE__ : Tuple = 5
SCREAMING_SNAKE_CASE__ : Dict = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE__ : str = best_score_hparams[model_dir]["length_penalty"]
else:
SCREAMING_SNAKE_CASE__ : Tuple = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = {
"langs": [src_lang, tgt_lang],
"model_max_length": 10_24,
"do_lower_case": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# model
SCREAMING_SNAKE_CASE__ : List[str] = chkpt["models"][0]
SCREAMING_SNAKE_CASE__ : Dict = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE__ : str = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE__ : List[str] = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
# save
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 157
| 1
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__UpperCAmelCase : List[Any] = "scheduler_config.json"
class _snake_case ( _A ):
_A = 1
_A = 2
_A = 3
_A = 4
_A = 5
_A = 6
_A = 7
_A = 8
_A = 9
_A = 10
_A = 11
_A = 12
_A = 13
_A = 14
@dataclass
class _snake_case ( _A ):
_A = 42
class _snake_case :
_A = SCHEDULER_CONFIG_NAME
_A = []
_A = True
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase=False ,**UpperCamelCase ,) -> str:
snake_case__ , snake_case__ , snake_case__ :str = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase ,subfolder=UpperCamelCase ,return_unused_kwargs=UpperCamelCase ,return_commit_hash=UpperCamelCase ,**UpperCamelCase ,)
return cls.from_config(UpperCamelCase ,return_unused_kwargs=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = False ,**UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase ,push_to_hub=UpperCamelCase ,**UpperCamelCase )
@property
def lowerCAmelCase_ ( self ) -> int:
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :Tuple = list(set([cls.__name__] + cls._compatibles ) )
snake_case__ :Dict = importlib.import_module(__name__.split("." )[0] )
snake_case__ :Optional[int] = [
getattr(UpperCamelCase ,UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase ,UpperCamelCase )
]
return compatible_classes
| 241
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _A , unittest.TestCase ):
_A = DDIMPipeline
_A = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_A = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_A = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_A = False
def lowerCAmelCase_ ( self ) -> Any:
torch.manual_seed(0 )
snake_case__ :List[str] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
snake_case__ :int = DDIMScheduler()
snake_case__ :List[Any] = {"unet": unet, "scheduler": scheduler}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> Optional[Any]:
if str(UpperCamelCase ).startswith("mps" ):
snake_case__ :Dict = torch.manual_seed(UpperCamelCase )
else:
snake_case__ :Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ :List[str] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Tuple = "cpu"
snake_case__ :int = self.get_dummy_components()
snake_case__ :str = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Optional[Any] = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :Dict = pipe(**UpperCamelCase ).images
snake_case__ :Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 32, 32, 3) )
snake_case__ :Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
snake_case__ :Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase ,1E-3 )
def lowerCAmelCase_ ( self ) -> Any:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase_ ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[int] = "google/ddpm-cifar10-32"
snake_case__ :int = UNetaDModel.from_pretrained(UpperCamelCase )
snake_case__ :List[str] = DDIMScheduler()
snake_case__ :Any = DDIMPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
ddim.to(UpperCamelCase )
ddim.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Dict = torch.manual_seed(0 )
snake_case__ :Optional[Any] = ddim(generator=UpperCamelCase ,eta=0.0 ,output_type="numpy" ).images
snake_case__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ :str = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :int = "google/ddpm-ema-bedroom-256"
snake_case__ :Tuple = UNetaDModel.from_pretrained(UpperCamelCase )
snake_case__ :int = DDIMScheduler.from_pretrained(UpperCamelCase )
snake_case__ :Union[str, Any] = DDIMPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
ddpm.to(UpperCamelCase )
ddpm.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :int = torch.manual_seed(0 )
snake_case__ :Optional[int] = ddpm(generator=UpperCamelCase ,output_type="numpy" ).images
snake_case__ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case__ :Optional[int] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 241
| 1
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
lowerCAmelCase = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
lowerCAmelCase = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for tf_name, hf_name in patterns:
lowercase__ = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return k
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = BigBirdPegasusConfig(**_SCREAMING_SNAKE_CASE )
lowercase__ = BigBirdPegasusForConditionalGeneration(_SCREAMING_SNAKE_CASE )
lowercase__ = torch_model.state_dict()
lowercase__ = {}
# separating decoder weights
lowercase__ = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
lowercase__ = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
lowercase__ = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
lowercase__ = DECODER_PATTERNS
lowercase__ = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
lowercase__ = v.T
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
lowercase__ = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
lowercase__ = REMAINING_PATTERNS
lowercase__ = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
lowercase__ = v.T
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
lowercase__ = mapping['''model.embed_positions.weight''']
lowercase__ = mapping.pop('''model.embed_positions.weight''' )
lowercase__ , lowercase__ = torch_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
lowercase__ = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
lowercase__ = {}
lowercase__ = ['''global_step''']
for name, shape in tqdm(_SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
lowercase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase__ = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = array
return tf_weights
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = get_tf_weights_as_numpy(_SCREAMING_SNAKE_CASE )
lowercase__ = convert_bigbird_pegasus(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 702
|
lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE )
lowercase__ = ''''''.join(bin(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowercase__ = len(SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B'''=''' * ((6 - len(SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE ) % 6)
else:
lowercase__ = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = (
'''argument should be a bytes-like object or ASCII string, '''
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
lowercase__ = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowercase__ = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = ''''''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = ''''''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429
| 0
|
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowerCamelCase__ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowerCamelCase__ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
lowerCamelCase__ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] ):
def remove_articles(UpperCamelCase : List[Any] ):
A__ = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(UpperCamelCase , """ """ , UpperCamelCase )
def white_space_fix(UpperCamelCase : int ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : Union[str, Any] ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : str ):
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ):
A__ = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : str ):
A__ = [rgram for rgrams in rgramslist for rgram in rgrams]
A__ = Counter(UpperCamelCase )
A__ = Counter(UpperCamelCase )
A__ = Counter()
for sgram, scount in sgramcounter.items():
A__ = scount * numref
A__ = Counter(UpperCamelCase )
A__ = Counter()
for cgram, ccount in cgramcounter.items():
A__ = ccount * numref
# KEEP
A__ = sgramcounter_rep & cgramcounter_rep
A__ = keepgramcounter_rep & rgramcounter
A__ = sgramcounter_rep & rgramcounter
A__ = 0
A__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(UpperCamelCase ) > 0:
A__ = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
A__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
A__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
A__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
A__ = sgramcounter_rep - cgramcounter_rep
A__ = delgramcounter_rep - rgramcounter
A__ = sgramcounter_rep - rgramcounter
A__ = 0
A__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
if len(UpperCamelCase ) > 0:
A__ = deltmpscorea / len(UpperCamelCase )
# ADDITION
A__ = set(UpperCamelCase ) - set(UpperCamelCase )
A__ = set(UpperCamelCase ) & set(UpperCamelCase )
A__ = set(UpperCamelCase ) - set(UpperCamelCase )
A__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(UpperCamelCase ) > 0:
A__ = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
A__ = addtmpscore / len(UpperCamelCase )
A__ = 0
if addscore_precision > 0 or addscore_recall > 0:
A__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Dict ):
A__ = len(UpperCamelCase )
A__ = ssent.split(""" """ )
A__ = csent.split(""" """ )
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
for rsent in rsents:
A__ = rsent.split(""" """ )
A__ = []
A__ = []
A__ = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
A__ = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
A__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
A__ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
A__ = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
A__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
A__ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
A__ = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
A__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
A__ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((A__) ,(A__) ,(A__)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((A__) ,(A__) ,(A__)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((A__) ,(A__) ,(A__)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((A__) ,(A__) ,(A__)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
A__ = sum([delascore, delascore, delascore, delascore] ) / 4
A__ = sum([addascore, addascore, addascore, addascore] ) / 4
A__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
A__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
A__ = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
A__ = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
A__ = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
A__ = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
A__ = sentence
if not return_str:
A__ = normalized_sent.split()
return normalized_sent
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
A__ = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
A__ = sari_score / len(UpperCamelCase )
return 100 * sari_score
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Dict="exp" , UpperCamelCase : str=None , UpperCamelCase : List[str]=False , UpperCamelCase : int=False , UpperCamelCase : List[Any]=False , ):
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCamelCase ( datasets.Metric):
def A (self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = {}
result.update({"""sari""": compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({"""exact""": compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 574
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( __snake_case , unittest.TestCase):
__lowerCamelCase = LongformerTokenizer
__lowerCamelCase = True
__lowerCamelCase = LongformerTokenizerFast
__lowerCamelCase = True
def A (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
A__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def A (self , **lowerCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A (self , **lowerCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A (self , lowerCamelCase__ ):
"""simple docstring"""
A__ = """lower newer"""
A__ = """lower newer"""
return input_text, output_text
def A (self ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = """lower newer"""
A__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A__ = tokenizer.tokenize(lowerCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def A (self ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
A__ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase__ )
A__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase__ )
A__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
A__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
A__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
A__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A (self ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = """Encode this sequence."""
A__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
A__ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing spaces after special tokens
A__ = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
A__ = """Encode <mask> sequence"""
A__ = """Encode <mask>sequence"""
A__ = tokenizer.encode(lowerCamelCase__ )
A__ = encoded.index(lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ = tokenizer.encode(lowerCamelCase__ )
A__ = encoded.index(lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
pass
def A (self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
A__ = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
A__ = """A, <mask> AllenNLP sentence."""
A__ = tokenizer_r.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
A__ = tokenizer_p.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCamelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def A (self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowerCamelCase__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowerCamelCase__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F"""{text_of_1_token} {text_of_1_token}"""
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
| 574
| 1
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ):
__lowerCAmelCase = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__lowerCAmelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def A__ ( self ):
__lowerCAmelCase = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
__lowerCAmelCase = [sys.executable] + distributed_args
execute_subprocess_async(_A , env=os.environ.copy() )
| 701
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =42
class snake_case_ ( _a , _a ):
"""simple docstring"""
__UpperCAmelCase =True
@register_to_config
def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (6_4,) , _A = 1 , _A = "silu" , _A = 4 , _A = 3_2 , _A = 3_2 , _A = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , norm_num_groups=_A , act_fn=_A , )
__lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCAmelCase = nn.Convad(_A , _A , 1 )
__lowerCAmelCase = False
__lowerCAmelCase = False
# only relevant if vae tiling is enabled
__lowerCAmelCase = self.config.sample_size
__lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCAmelCase = 0.25
def A__ ( self , _A , _A=False ):
if isinstance(_A , (Encoder, Decoder) ):
__lowerCAmelCase = value
def A__ ( self , _A = True ):
__lowerCAmelCase = use_tiling
def A__ ( self ):
self.enable_tiling(_A )
def A__ ( self ):
__lowerCAmelCase = True
def A__ ( self ):
__lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
__lowerCAmelCase = {}
def fn_recursive_add_processors(_A , _A , _A ):
if hasattr(_A , 'set_processor' ):
__lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def A__ ( self , _A ):
__lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A , _A , _A ):
if hasattr(_A , 'set_processor' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def A__ ( self , _A , _A = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_A , return_dict=_A )
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase = [self.encoder(_A ) for x_slice in x.split(1 )]
__lowerCAmelCase = torch.cat(_A )
else:
__lowerCAmelCase = self.encoder(_A )
__lowerCAmelCase = self.quant_conv(_A )
__lowerCAmelCase = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def A__ ( self , _A , _A = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_A , return_dict=_A )
__lowerCAmelCase = self.post_quant_conv(_A )
__lowerCAmelCase = self.decoder(_A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
@apply_forward_hook
def A__ ( self , _A , _A = True ):
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase = [self._decode(_A ).sample for z_slice in z.split(1 )]
__lowerCAmelCase = torch.cat(_A )
else:
__lowerCAmelCase = self._decode(_A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_A )
def A__ ( self , _A , _A , _A ):
__lowerCAmelCase = min(a.shape[2] , b.shape[2] , _A )
for y in range(_A ):
__lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def A__ ( self , _A , _A , _A ):
__lowerCAmelCase = min(a.shape[3] , b.shape[3] , _A )
for x in range(_A ):
__lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def A__ ( self , _A , _A = True ):
__lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase = []
for i in range(0 , x.shape[2] , _A ):
__lowerCAmelCase = []
for j in range(0 , x.shape[3] , _A ):
__lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase = self.encoder(_A )
__lowerCAmelCase = self.quant_conv(_A )
row.append(_A )
rows.append(_A )
__lowerCAmelCase = []
for i, row in enumerate(_A ):
__lowerCAmelCase = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowerCAmelCase = torch.cat(_A , dim=2 )
__lowerCAmelCase = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def A__ ( self , _A , _A = True ):
__lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase = []
for i in range(0 , z.shape[2] , _A ):
__lowerCAmelCase = []
for j in range(0 , z.shape[3] , _A ):
__lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase = self.post_quant_conv(_A )
__lowerCAmelCase = self.decoder(_A )
row.append(_A )
rows.append(_A )
__lowerCAmelCase = []
for i, row in enumerate(_A ):
__lowerCAmelCase = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowerCAmelCase = torch.cat(_A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def A__ ( self , _A , _A = False , _A = True , _A = None , ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(_A ).latent_dist
if sample_posterior:
__lowerCAmelCase = posterior.sample(generator=_A )
else:
__lowerCAmelCase = posterior.mode()
__lowerCAmelCase = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 102
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Any = logging.get_logger(__name__)
lowercase__ :Any = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Any = 'decision_transformer'
_A : Optional[Any] = ['past_key_values']
_A : Tuple = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , __lowercase : Optional[int]=17 , __lowercase : Optional[Any]=4 , __lowercase : int=128 , __lowercase : Any=4_096 , __lowercase : Optional[Any]=True , __lowercase : Optional[int]=1 , __lowercase : Union[str, Any]=1_024 , __lowercase : List[str]=3 , __lowercase : Optional[Any]=1 , __lowercase : List[Any]=None , __lowercase : List[Any]="relu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Any=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Union[str, Any]=1e-5 , __lowercase : Dict=0.0_2 , __lowercase : Tuple=True , __lowercase : Tuple=True , __lowercase : str=50_256 , __lowercase : List[str]=50_256 , __lowercase : List[Any]=False , __lowercase : Union[str, Any]=False , **__lowercase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = state_dim
__UpperCAmelCase : List[str] = act_dim
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Tuple = max_ep_len
__UpperCAmelCase : Tuple = action_tanh
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Any = n_layer
__UpperCAmelCase : Tuple = n_head
__UpperCAmelCase : Union[str, Any] = n_inner
__UpperCAmelCase : List[str] = activation_function
__UpperCAmelCase : Optional[int] = resid_pdrop
__UpperCAmelCase : List[str] = embd_pdrop
__UpperCAmelCase : Optional[int] = attn_pdrop
__UpperCAmelCase : Tuple = layer_norm_epsilon
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Optional[int] = scale_attn_weights
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : List[str] = scale_attn_by_inverse_layer_idx
__UpperCAmelCase : str = reorder_and_upcast_attn
__UpperCAmelCase : Tuple = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 522
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowercase__ :List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__UpperCAmelCase : Any = downstream_dict['''projector.weight''']
__UpperCAmelCase : List[Any] = downstream_dict['''projector.bias''']
__UpperCAmelCase : str = downstream_dict['''model.post_net.linear.weight''']
__UpperCAmelCase : Dict = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__UpperCAmelCase : str = downstream_dict['''model.linear.weight''']
__UpperCAmelCase : List[Any] = downstream_dict['''model.linear.bias''']
return model
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = WavaVecaForXVector.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = downstream_dict['''connector.weight''']
__UpperCAmelCase : Optional[Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase : List[str] = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__UpperCAmelCase : Optional[int] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__UpperCAmelCase : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__UpperCAmelCase : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__UpperCAmelCase : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__UpperCAmelCase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__UpperCAmelCase : Optional[Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : str = torch.load(UpperCAmelCase_ , map_location='''cpu''' )
__UpperCAmelCase : List[Any] = checkpoint['''Downstream''']
__UpperCAmelCase : int = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ )
__UpperCAmelCase : int = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__UpperCAmelCase : int = convert_classification(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__UpperCAmelCase : Optional[int] = convert_diarization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('''ForXVector''' ):
__UpperCAmelCase : int = convert_xvector(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase : Tuple = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowercase__ :Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 522
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : List[Any] = """data2vec-vision"""
def __init__(self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1e-1_2 , lowerCAmelCase_=224 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=True , lowerCAmelCase_=[3, 5, 7, 11] , lowerCAmelCase_=[1, 2, 3, 6] , lowerCAmelCase_=True , lowerCAmelCase_=0.4 , lowerCAmelCase_=256 , lowerCAmelCase_=1 , lowerCAmelCase_=False , lowerCAmelCase_=255 , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
A_ : Optional[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : str = intermediate_size
A_ : List[Any] = hidden_act
A_ : int = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Tuple = initializer_range
A_ : Union[str, Any] = layer_norm_eps
A_ : Optional[Any] = image_size
A_ : int = patch_size
A_ : List[Any] = num_channels
A_ : str = use_mask_token
A_ : int = use_absolute_position_embeddings
A_ : Any = use_relative_position_bias
A_ : Union[str, Any] = use_shared_relative_position_bias
A_ : str = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : List[Any] = out_indices
A_ : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : List[str] = use_auxiliary_head
A_ : Dict = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Optional[int] = auxiliary_num_convs
A_ : Any = auxiliary_concat_input
A_ : Union[str, Any] = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Any = version.parse("""1.11""" )
@property
def lowerCamelCase(self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase(self ):
return 1e-4
| 480
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : List[str] = """deta"""
_A : Tuple = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__(self , lowerCAmelCase_=None , lowerCAmelCase_=900 , lowerCAmelCase_=2048 , lowerCAmelCase_=6 , lowerCAmelCase_=2048 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=1024 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=256 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_=True , lowerCAmelCase_=300 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.25 , **lowerCAmelCase_ , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A_ : Any = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[str] = backbone_config.pop("""model_type""" )
A_ : List[str] = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(lowerCAmelCase_ )
A_ : List[Any] = backbone_config
A_ : List[str] = num_queries
A_ : List[Any] = max_position_embeddings
A_ : int = d_model
A_ : Optional[Any] = encoder_ffn_dim
A_ : Optional[Any] = encoder_layers
A_ : List[Any] = encoder_attention_heads
A_ : int = decoder_ffn_dim
A_ : Dict = decoder_layers
A_ : Dict = decoder_attention_heads
A_ : Optional[Any] = dropout
A_ : str = attention_dropout
A_ : Any = activation_dropout
A_ : Union[str, Any] = activation_function
A_ : int = init_std
A_ : List[Any] = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : Any = auxiliary_loss
A_ : Tuple = position_embedding_type
# deformable attributes
A_ : List[str] = num_feature_levels
A_ : Tuple = encoder_n_points
A_ : str = decoder_n_points
A_ : List[Any] = two_stage
A_ : Optional[Any] = two_stage_num_proposals
A_ : Union[str, Any] = with_box_refine
A_ : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
A_ : List[Any] = class_cost
A_ : str = bbox_cost
A_ : Tuple = giou_cost
# Loss coefficients
A_ : int = mask_loss_coefficient
A_ : Union[str, Any] = dice_loss_coefficient
A_ : Union[str, Any] = bbox_loss_coefficient
A_ : Optional[Any] = giou_loss_coefficient
A_ : List[str] = eos_coefficient
A_ : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase(self ):
return self.encoder_attention_heads
@property
def lowerCamelCase(self ):
return self.d_model
def lowerCamelCase(self ):
A_ : str = copy.deepcopy(self.__dict__ )
A_ : int = self.backbone_config.to_dict()
A_ : Dict = self.__class__.model_type
return output
| 480
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Optional[Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'wavlm'
def __init__( self : Dict , SCREAMING_SNAKE_CASE : List[Any]=3_2 , SCREAMING_SNAKE_CASE : List[str]=7_6_8 , SCREAMING_SNAKE_CASE : Dict=1_2 , SCREAMING_SNAKE_CASE : Tuple=1_2 , SCREAMING_SNAKE_CASE : List[str]=3_0_7_2 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0_2 , SCREAMING_SNAKE_CASE : Any=1E-5 , SCREAMING_SNAKE_CASE : List[Any]="group" , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE : List[str]=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE : List[Any]=(1_0, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : int=1_2_8 , SCREAMING_SNAKE_CASE : str=1_6 , SCREAMING_SNAKE_CASE : Optional[Any]=3_2_0 , SCREAMING_SNAKE_CASE : Union[str, Any]=8_0_0 , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Optional[int]=0.0_5 , SCREAMING_SNAKE_CASE : str=1_0 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE : Optional[int]=3_2_0 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_0_0 , SCREAMING_SNAKE_CASE : List[str]=2_5_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=2_5_6 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Any="mean" , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Dict=2_5_6 , SCREAMING_SNAKE_CASE : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , SCREAMING_SNAKE_CASE : Union[str, Any]=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE : Optional[Any]=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE : Any=5_1_2 , SCREAMING_SNAKE_CASE : str=8_0 , SCREAMING_SNAKE_CASE : Any=0 , SCREAMING_SNAKE_CASE : Dict=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
lowerCAmelCase = hidden_size
lowerCAmelCase = feat_extract_norm
lowerCAmelCase = feat_extract_activation
lowerCAmelCase = list(SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(SCREAMING_SNAKE_CASE )
lowerCAmelCase = conv_bias
lowerCAmelCase = num_buckets
lowerCAmelCase = max_bucket_distance
lowerCAmelCase = num_conv_pos_embeddings
lowerCAmelCase = num_conv_pos_embedding_groups
lowerCAmelCase = len(self.conv_dim )
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = feat_proj_dropout
lowerCAmelCase = final_dropout
lowerCAmelCase = layerdrop
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = num_ctc_classes
lowerCAmelCase = vocab_size
lowerCAmelCase = do_stable_layer_norm
lowerCAmelCase = use_weighted_layer_sum
lowerCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase = apply_spec_augment
lowerCAmelCase = mask_time_prob
lowerCAmelCase = mask_time_length
lowerCAmelCase = mask_time_min_masks
lowerCAmelCase = mask_feature_prob
lowerCAmelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase = num_codevectors_per_group
lowerCAmelCase = num_codevector_groups
lowerCAmelCase = contrastive_logits_temperature
lowerCAmelCase = num_negatives
lowerCAmelCase = codevector_dim
lowerCAmelCase = proj_codevector_dim
lowerCAmelCase = diversity_loss_weight
# ctc loss
lowerCAmelCase = ctc_loss_reduction
lowerCAmelCase = ctc_zero_infinity
# adapter
lowerCAmelCase = add_adapter
lowerCAmelCase = adapter_kernel_size
lowerCAmelCase = adapter_stride
lowerCAmelCase = num_adapter_layers
lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase = list(SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(SCREAMING_SNAKE_CASE )
lowerCAmelCase = xvector_output_dim
@property
def __A ( self : Dict ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 649
|
'''simple docstring'''
def __a ( A__ = 1000 ) -> int:
lowerCAmelCase = 3
lowerCAmelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 649
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
SCREAMING_SNAKE_CASE__ : Any = ["gpt2"]
SCREAMING_SNAKE_CASE__ : str = "gpt2"
if is_tf_available():
class A_ ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> Optional[Any]:
super().__init__()
a : Any = tokenizer
a : Any = AutoConfig.from_pretrained(__UpperCAmelCase )
a : str = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowercase_ ( self , __UpperCAmelCase ) -> Dict:
a : Tuple = self.tokenizer(__UpperCAmelCase )
a : List[Any] = tokenized['input_ids'].to_tensor()
a : List[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
a : Optional[Any] = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[str]:
super().setUp()
a : Any = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
a : Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
a : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
a : Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ ( self ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
a : Optional[Any] = tokenizer([test_inputs] , return_tensors='tf' )
a : List[str] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
a : List[Any] = python_outputs[key].numpy()
a : str = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def lowercase_ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
a : Optional[Any] = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
a : Dict = tf.constant(__UpperCAmelCase )
a : List[str] = compiled_tokenizer(__UpperCAmelCase )
a : List[Any] = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ ( self ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
a : Union[str, Any] = ModelToSave(tokenizer=__UpperCAmelCase )
a : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
a : Union[str, Any] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
a : Optional[int] = Path(__UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'serving_default': model.serving} )
a : Union[str, Any] = tf.saved_model.load(__UpperCAmelCase )
a : Optional[int] = loaded_model.signatures['serving_default'](__UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowercase_ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
a : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
a : Optional[Any] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
a : Optional[int] = tf_tokenizer.get_config()
a : str = TFGPTaTokenizer.from_config(__UpperCAmelCase )
a : Dict = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowercase_ ( self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
a : Optional[Any] = 12_31_23
for max_length in [3, 5, 10_24]:
a : Any = tf.convert_to_tensor([self.test_sentences[0]] )
a : List[Any] = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
a : Optional[int] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 509
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
SCREAMING_SNAKE_CASE__ : Any = ["gpt2"]
SCREAMING_SNAKE_CASE__ : str = "gpt2"
if is_tf_available():
class A_ ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> Optional[Any]:
super().__init__()
a : Any = tokenizer
a : Any = AutoConfig.from_pretrained(__UpperCAmelCase )
a : str = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowercase_ ( self , __UpperCAmelCase ) -> Dict:
a : Tuple = self.tokenizer(__UpperCAmelCase )
a : List[Any] = tokenized['input_ids'].to_tensor()
a : List[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
a : Optional[Any] = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[str]:
super().setUp()
a : Any = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
a : Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
a : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
a : Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ ( self ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
a : Optional[Any] = tokenizer([test_inputs] , return_tensors='tf' )
a : List[str] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
a : List[Any] = python_outputs[key].numpy()
a : str = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def lowercase_ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
a : Optional[Any] = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
a : Dict = tf.constant(__UpperCAmelCase )
a : List[str] = compiled_tokenizer(__UpperCAmelCase )
a : List[Any] = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ ( self ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
a : Union[str, Any] = ModelToSave(tokenizer=__UpperCAmelCase )
a : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
a : Union[str, Any] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
a : Optional[int] = Path(__UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'serving_default': model.serving} )
a : Union[str, Any] = tf.saved_model.load(__UpperCAmelCase )
a : Optional[int] = loaded_model.signatures['serving_default'](__UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowercase_ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
a : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
a : Optional[Any] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
a : Optional[int] = tf_tokenizer.get_config()
a : str = TFGPTaTokenizer.from_config(__UpperCAmelCase )
a : Dict = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowercase_ ( self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
a : Optional[Any] = 12_31_23
for max_length in [3, 5, 10_24]:
a : Any = tf.convert_to_tensor([self.test_sentences[0]] )
a : List[Any] = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
a : Optional[int] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 509
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 10**9 ):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 155
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__magic_name__ = "<<<<<<< This should probably be modified because it mentions: "
__magic_name__ = "=======\n>>>>>>>\n"
__magic_name__ = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__magic_name__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def _lowerCAmelCase ( UpperCamelCase_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@staticmethod
def snake_case_ ( lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to the HuggingFace Datasets folder.""")
train_parser.set_defaults(func=lowerCAmelCase__)
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = get_logger("""datasets-cli/converting""")
__SCREAMING_SNAKE_CASE = tfds_path
__SCREAMING_SNAKE_CASE = datasets_directory
def snake_case_ ( self):
if os.path.isdir(self._tfds_path):
__SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
__SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path)
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""")
__SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory)
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
if os.path.isdir(self._tfds_path):
__SCREAMING_SNAKE_CASE = os.listdir(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}")
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
if not os.path.isfile(lowerCAmelCase__) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""")
continue
with open(lowerCAmelCase__ , encoding="""utf-8""") as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
for line in lines:
__SCREAMING_SNAKE_CASE = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__SCREAMING_SNAKE_CASE = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
__SCREAMING_SNAKE_CASE = """"""
continue
elif "from absl import logging" in out_line:
__SCREAMING_SNAKE_CASE = """from datasets import logging\n"""
elif "getLogger" in out_line:
__SCREAMING_SNAKE_CASE = out_line.replace("""getLogger""" , """get_logger""")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = list(filter(lambda lowerCAmelCase__: e in out_line , lowerCAmelCase__))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase__) + """\n""")
out_lines.append(lowerCAmelCase__)
out_lines.append(lowerCAmelCase__)
continue
else:
for pattern, replacement in TO_CONVERT:
__SCREAMING_SNAKE_CASE = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__SCREAMING_SNAKE_CASE = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowerCAmelCase__)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(""","""))
__SCREAMING_SNAKE_CASE = """from . import """ + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__SCREAMING_SNAKE_CASE = True
out_lines.append(lowerCAmelCase__)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__SCREAMING_SNAKE_CASE = f_name.replace(""".py""" , """""")
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
self._logger.info(f"Adding directory {output_dir}")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase__)
if needs_manual_update:
with_manual_update.append(lowerCAmelCase__)
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""") as f:
f.writelines(lowerCAmelCase__)
self._logger.info(f"Converted in {output_file}")
for utils_file in utils_files:
try:
__SCREAMING_SNAKE_CASE = os.path.basename(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace(""".py""" , """""")]
self._logger.info(f"Moving {dest_folder} to {utils_file}")
shutil.copy(lowerCAmelCase__ , lowerCAmelCase__)
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.")
| 155
| 1
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase_ :
def __init__( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = psutil.Process()
UpperCAmelCase__ : int = False
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = -1
while True:
UpperCAmelCase__ : Dict = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Tuple = threading.Thread(target=self.peak_monitor )
UpperCAmelCase__ : List[Any] = True
self.thread.start()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = False
self.thread.join()
return self.cpu_memory_peak
UpperCamelCase__ = PeakCPUMemory()
def a__ ( ) -> Dict:
# Time
UpperCAmelCase__ : str = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase__ : str = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase__ : str = torch.cuda.memory_allocated(lowerCAmelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
# Time
UpperCAmelCase__ : Optional[Any] = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase__ : str = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
UpperCAmelCase__ : str = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase__ : Optional[Any] = (torch.cuda.memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
UpperCAmelCase__ : List[Any] = (torch.cuda.max_memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
return measures
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(lowerCAmelCase__ )]:.2f}MiB""" )
UpperCAmelCase__ : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 703
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : Tuple , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , **_A : List[str] , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : List[Any] = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
UpperCAmelCase__ : Tuple = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Tuple = resample
UpperCAmelCase__ : Optional[Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : Tuple = crop_size
UpperCAmelCase__ : List[str] = do_flip_channel_order
def lowercase_ ( self : List[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : Dict = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Union[str, Any] , _A : np.ndarray , _A : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(_A , data_format=_A )
def lowercase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Any = resample if resample is not None else self.resample
UpperCAmelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase__ : Dict = size if size is not None else self.size
UpperCAmelCase__ : Any = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : List[Any] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : List[str] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : int = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Any = [self.rescale(image=_A , scale=_A ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase__ : Optional[Any] = [self.flip_channel_order(image=_A ) for image in images]
UpperCAmelCase__ : Any = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : List[str] = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def lowercase_ ( self : Dict , _A : Dict , _A : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase__ : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
UpperCAmelCase__ : int = target_sizes.numpy()
UpperCAmelCase__ : Any = []
for idx in range(len(_A ) ):
UpperCAmelCase__ : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
UpperCAmelCase__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
UpperCAmelCase__ : Optional[Any] = logits.argmax(dim=1 )
UpperCAmelCase__ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 312
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A__ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = BartphoTokenizer
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[Any] = True
def __lowerCamelCase ( self :Dict ):
super().setUp()
snake_case__ : Any = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
snake_case__ : Optional[Any] = dict(zip(__lowercase ,range(len(__lowercase ) ) ) )
snake_case__ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
snake_case__ : Any = BartphoTokenizer(__lowercase ,self.monolingual_vocab_file ,**self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :Optional[Any] ,**__lowercase :Dict ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :List[str] ,__lowercase :List[Any] ):
snake_case__ : str = '''This is a là test'''
snake_case__ : List[str] = '''This is a<unk><unk> test'''
return input_text, output_text
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Tuple = BartphoTokenizer(__lowercase ,self.monolingual_vocab_file ,**self.special_tokens_map )
snake_case__ : Dict = '''This is a là test'''
snake_case__ : int = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
snake_case__ : Dict = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : int = tokens + [tokenizer.unk_token]
snake_case__ : Optional[int] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,__lowercase )
| 252
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 252
| 1
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ():
_lowerCAmelCase = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
_lowerCAmelCase = Dataset.from_dict(__lowerCamelCase )
return dataset
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = get_dataset()
_lowerCAmelCase = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = get_dataset()
_lowerCAmelCase , _lowerCAmelCase = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 162
|
'''simple docstring'''
def A (__lowerCamelCase :int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if number < 1:
_lowerCAmelCase = f'Input value of [number={number}] must be > 0'
raise ValueError(__lowerCamelCase )
_lowerCAmelCase = 1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : List[str] = {"""vocab_file""": """spiece.model"""}
a__ : List[str] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a__ : Optional[Any] = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a__ : Union[str, Any] = """▁"""
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __magic_name__ , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__="[SEP]" , __magic_name__="<unk>" , __magic_name__="[SEP]" , __magic_name__="<pad>" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = (
AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ , normalized=__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ )
else mask_token
)
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
if self.remove_space:
_lowerCAmelCase = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize('NFKD' , __magic_name__ )
_lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(__magic_name__ )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.preprocess_text(__magic_name__ )
_lowerCAmelCase = self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
_lowerCAmelCase = []
for piece in pieces:
if len(__magic_name__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__magic_name__ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__magic_name__ )
else:
new_pieces.append(__magic_name__ )
return new_pieces
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
return self.sp_model.PieceToId(__magic_name__ )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(__magic_name__ )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = ''
_lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
_lowerCAmelCase = True
_lowerCAmelCase = []
else:
current_sub_tokens.append(__magic_name__ )
_lowerCAmelCase = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
__magic_name__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , 'wb' ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 589
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Optional[Any] = XLMRobertaTokenizer
UpperCamelCase : Any = XLMRobertaTokenizerFast
UpperCamelCase : Tuple = True
UpperCamelCase : Tuple = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = XLMRobertaTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = '<pad>'
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = XLMRobertaTokenizer(__magic_name__ , keep_accents=__magic_name__ )
_lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(__magic_name__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = tokenizer_r.save_pretrained(__magic_name__ )
_lowerCAmelCase = tokenizer_p.save_pretrained(__magic_name__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__magic_name__ , __magic_name__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase = tokenizer_r.from_pretrained(__magic_name__ )
_lowerCAmelCase = tokenizer_p.from_pretrained(__magic_name__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__magic_name__ , __magic_name__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__magic_name__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = tokenizer_r.save_pretrained(__magic_name__ , legacy_format=__magic_name__ )
_lowerCAmelCase = tokenizer_p.save_pretrained(__magic_name__ )
# Checks it save with the same files
self.assertSequenceEqual(__magic_name__ , __magic_name__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase = tokenizer_r.from_pretrained(__magic_name__ )
_lowerCAmelCase = tokenizer_p.from_pretrained(__magic_name__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__magic_name__ , __magic_name__ ) )
shutil.rmtree(__magic_name__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = tokenizer_r.save_pretrained(__magic_name__ , legacy_format=__magic_name__ )
_lowerCAmelCase = tokenizer_p.save_pretrained(__magic_name__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase = tokenizer_r.from_pretrained(__magic_name__ )
_lowerCAmelCase = tokenizer_p.from_pretrained(__magic_name__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__magic_name__ , __magic_name__ ) )
shutil.rmtree(__magic_name__ )
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def _lowerCamelCase ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__magic_name__ , f.name )
_lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__magic_name__ )
_lowerCAmelCase = pickle.dumps(__magic_name__ )
pickle.loads(__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = 'I was born in 92000, and this is falsé.'
_lowerCAmelCase = tokenizer.tokenize(__magic_name__ )
_lowerCAmelCase = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
_lowerCAmelCase = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
_lowerCAmelCase = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(__magic_name__ )
_lowerCAmelCase = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'Hello World!'
_lowerCAmelCase = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_lowerCAmelCase = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 589
| 1
|
import os
def UpperCamelCase__ ( ) -> Dict:
"""simple docstring"""
with open(os.path.dirname(UpperCAmelCase ) + '''/p022_names.txt''' ) as file:
_a : Optional[Any] = str(file.readlines()[0] )
_a : Tuple = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_a : Tuple = 0
_a : int = 0
for i, name in enumerate(UpperCAmelCase ):
for letter in name:
name_score += ord(UpperCAmelCase ) - 64
total_score += (i + 1) * name_score
_a : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 307
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar('T')
__lowerCamelCase = TypeVar('U')
class UpperCamelCase_ ( Generic[T, U] ):
def __init__( self , lowercase , lowercase ) -> Any:
_a : Optional[Any] = key
_a : List[str] = val
_a : DoubleLinkedListNode[T, U] | None = None
_a : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class UpperCamelCase_ ( Generic[T, U] ):
def __init__( self ) -> None:
_a : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase , lowercase )
_a : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase , lowercase )
_a , _a : Optional[Any] = self.rear, self.head
def __repr__( self ) -> str:
_a : List[Any] = ['''DoubleLinkedList''']
_a : List[str] = self.head
while node.next is not None:
rep.append(str(lowercase ) )
_a : Any = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase )
def snake_case__( self , lowercase ) -> None:
_a : int = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a : str = node
_a : List[str] = previous
_a : List[Any] = node
_a : Tuple = self.rear
def snake_case__( self , lowercase ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_a : str = node.next
_a : Tuple = node.prev
_a : Dict = None
_a : List[str] = None
return node
class UpperCamelCase_ ( Generic[T, U] ):
lowercase = {}
def __init__( self , lowercase ) -> int:
_a : DoubleLinkedList[T, U] = DoubleLinkedList()
_a : Optional[Any] = capacity
_a : Dict = 0
_a : Optional[int] = 0
_a : Dict = 0
_a : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , lowercase ) -> bool:
return key in self.cache
def snake_case__( self , lowercase ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a : DoubleLinkedListNode[T, U] = self.cache[key]
_a : Dict = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase )
return node.val
self.miss += 1
return None
def snake_case__( self , lowercase , lowercase ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a : Union[str, Any] = DoubleLinkedListNode(lowercase , lowercase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a : List[str] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a : Dict = value
self.list.add(lowercase )
@classmethod
def snake_case__( cls , lowercase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(lowercase ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a : str = LRUCache(lowercase )
_a : List[str] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a : Optional[int] = func(*lowercase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase , '''cache_info''' , lowercase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = (DDPMParallelScheduler,)
def lowerCamelCase_ ( self : int , **snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**snake_case )
return config
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**snake_case )
SCREAMING_SNAKE_CASE : List[str] = len(snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = samplea.shape[0]
SCREAMING_SNAKE_CASE : str = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE : Tuple = torch.arange(snake_case )[0:3, None].repeat(1 , snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE : Dict = scheduler.batch_step_no_noise(snake_case , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(snake_case ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**snake_case )
SCREAMING_SNAKE_CASE : Tuple = len(snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : List[str] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Any = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
SCREAMING_SNAKE_CASE : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(snake_case ) )
SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE : Any = scheduler_class(**snake_case )
SCREAMING_SNAKE_CASE : str = len(snake_case )
SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
SCREAMING_SNAKE_CASE : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(snake_case ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**snake_case )
SCREAMING_SNAKE_CASE : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
SCREAMING_SNAKE_CASE : str = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
SCREAMING_SNAKE_CASE : Optional[int] = -1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps[i + 1]
SCREAMING_SNAKE_CASE : Tuple = scheduler.previous_timestep(snake_case )
SCREAMING_SNAKE_CASE : int = prev_t.item()
self.assertEqual(snake_case , snake_case )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**snake_case )
SCREAMING_SNAKE_CASE : Any = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : str = scheduler_class(**snake_case )
SCREAMING_SNAKE_CASE : List[Any] = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
with self.assertRaises(snake_case , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**snake_case )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case )
| 352
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
# load base model
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE : Any = load_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE : Dict = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Tuple = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE : Any = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Dict = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(__lowerCAmelCase ) > -1:
try:
SCREAMING_SNAKE_CASE : List[Any] = curr_layer.__getattr__(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = layer_infos.pop(0 )
elif len(__lowerCAmelCase ) == 0:
break
except Exception:
if len(__lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(__lowerCAmelCase )
else:
pair_keys.append(__lowerCAmelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE : str = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE : List[Any] = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCAmelCase )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.base_model_path
_lowerCamelCase : str = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Any = args.lora_prefix_unet
_lowerCamelCase : List[Any] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Optional[Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 352
| 1
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_lowerCAmelCase : Any = None
try:
import msvcrt
except ImportError:
_lowerCAmelCase : Tuple = None
try:
import fcntl
except ImportError:
_lowerCAmelCase : Any = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_lowerCAmelCase : List[str] = OSError
# Data
# ------------------------------------------------
_lowerCAmelCase : Optional[Any] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_lowerCAmelCase : Union[str, Any] = "3.0.12"
_lowerCAmelCase : Dict = None
def _A ( ):
global _logger
snake_case__ : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : int = lock_file
return None
def __str__( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : str = lock
return None
def __enter__( self ) -> Tuple:
"""simple docstring"""
return self.lock
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
self.lock.release()
return None
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> int:
"""simple docstring"""
snake_case__ : Tuple = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
snake_case__ : int = self.hash_filename_if_too_long(lowerCamelCase , lowerCamelCase )
# The path to the lock file.
snake_case__ : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
snake_case__ : Dict = None
# The default timeout value.
snake_case__ : Any = timeout
# We use this lock primarily for the lock counter.
snake_case__ : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
snake_case__ : Any = 0
return None
@property
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._lock_file
@property
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = float(lowerCamelCase )
return None
def lowercase__ ( self ) -> int:
"""simple docstring"""
raise NotImplementedError()
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self._lock_file_fd is not None
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=0.05 ) -> str:
"""simple docstring"""
if timeout is None:
snake_case__ : Dict = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
snake_case__ : Tuple = id(self )
snake_case__ : Any = self._lock_file
snake_case__ : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
snake_case__ : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowercase__ ( self , lowerCamelCase=False ) -> str:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
snake_case__ : str = id(self )
snake_case__ : Tuple = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
snake_case__ : List[Any] = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
self.release()
return None
def __del__( self ) -> Any:
"""simple docstring"""
self.release(force=lowerCamelCase )
return None
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : List[str] = os.path.basename(lowerCamelCase )
if len(lowerCamelCase ) > max_length and max_length > 0:
snake_case__ : int = os.path.dirname(lowerCamelCase )
snake_case__ : List[str] = str(hash(lowerCamelCase ) )
snake_case__ : Optional[int] = filename[: max_length - len(lowerCamelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCamelCase , lowerCamelCase )
else:
return path
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> List[str]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase )
snake_case__ : int = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
snake_case__ : Optional[Any] = os.open(self._lock_file , lowerCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase )
else:
snake_case__ : Optional[Any] = fd
return None
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = self._lock_file_fd
snake_case__ : Tuple = None
msvcrt.locking(lowerCamelCase , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> Dict:
"""simple docstring"""
snake_case__ : str = os.statvfs(os.path.dirname(lowerCamelCase ) ).f_namemax
super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
snake_case__ : Any = os.open(self._lock_file , lowerCamelCase )
try:
fcntl.flock(lowerCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase )
else:
snake_case__ : Optional[Any] = fd
return None
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Tuple = self._lock_file_fd
snake_case__ : int = None
fcntl.flock(lowerCamelCase , fcntl.LOCK_UN )
os.close(lowerCamelCase )
return None
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
snake_case__ : List[Any] = os.open(self._lock_file , lowerCamelCase )
except OSError:
pass
else:
snake_case__ : Any = fd
return None
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
os.close(self._lock_file_fd )
snake_case__ : Tuple = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_lowerCAmelCase : Tuple = None
if msvcrt:
_lowerCAmelCase : Tuple = WindowsFileLock
elif fcntl:
_lowerCAmelCase : List[Any] = UnixFileLock
else:
_lowerCAmelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 694
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694
| 1
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Optional[int] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: List[Any] =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Tuple =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features
lowerCamelCase__: int =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
if split:
lowerCamelCase__: Any ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: str =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: List[str] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: Optional[Any] =Features({"image": Image()} )
lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 59
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) -> List[Any]:
_a = 1
_a = 3
_a = (3_2, 3_2)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
@property
def __lowerCAmelCase ( self ) -> int:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=snake_case_ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type="v_prediction" )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , )
_a = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.Generator(device=snake_case_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_a = output.images
_a = torch.Generator(device=snake_case_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=snake_case_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_a = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> int:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type="v_prediction" )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , )
_a = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_a = output.images
assert image.shape[0] == 2
_a = torch.Generator(device=snake_case_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_a = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __lowerCAmelCase ( self ) -> Any:
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type="v_prediction" )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_a = unet.half()
_a = text_encoder.half()
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , )
_a = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , ).images
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionUpscalePipeline.from_pretrained(snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_a = "a cat sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionUpscalePipeline.from_pretrained(
snake_case_ , torch_dtype=torch.floataa , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_a = "a cat sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionUpscalePipeline.from_pretrained(
snake_case_ , torch_dtype=torch.floataa , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = "a cat sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=5 , output_type="np" , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 131
| 0
|
def lowerCamelCase ( UpperCamelCase : int ) -> bool:
_lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase ( UpperCamelCase : int = 50_00 ) -> int:
_lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCamelCase )]
for i, pentagonal_i in enumerate(UpperCamelCase ):
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
_lowerCamelCase = pentagonal_nums[j]
_lowerCamelCase = pentagonal_i + pentagonal_j
_lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCamelCase ) and is_pentagonal(UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 708
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int]=10_24 ) -> int:
_lowerCamelCase , _lowerCamelCase = [], []
_lowerCamelCase = list(zip(UpperCamelCase , UpperCamelCase ) )
_lowerCamelCase , _lowerCamelCase = sorted_examples[0]
def is_too_big(UpperCamelCase : Union[str, Any] ):
return tok(UpperCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_lowerCamelCase = new_src + ' ' + src
_lowerCamelCase = new_tgt + ' ' + tgt
if is_too_big(UpperCamelCase ) or is_too_big(UpperCamelCase ): # cant fit, finalize example
finished_src.append(UpperCamelCase )
finished_tgt.append(UpperCamelCase )
_lowerCamelCase , _lowerCamelCase = src, tgt
else: # can fit, keep adding
_lowerCamelCase , _lowerCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase )
finished_tgt.append(UpperCamelCase )
return finished_src, finished_tgt
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Path , UpperCamelCase : int , UpperCamelCase : Optional[Any] ) -> List[Any]:
_lowerCamelCase = Path(UpperCamelCase )
save_path.mkdir(exist_ok=UpperCamelCase )
for split in ["train"]:
_lowerCamelCase , _lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
_lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase ).open().readlines()]
_lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase ).open().readlines()]
_lowerCamelCase , _lowerCamelCase = pack_examples(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
print(F"""packed {split} split from {len(UpperCamelCase )} examples -> {len(UpperCamelCase )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(UpperCamelCase ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(UpperCamelCase ) )
for split in ["val", "test"]:
_lowerCamelCase , _lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase , save_path / F"""{split}.target""" )
def lowerCamelCase ( ) -> int:
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=UpperCamelCase , default=1_28 )
parser.add_argument('--data_dir' , type=UpperCamelCase )
parser.add_argument('--save_path' , type=UpperCamelCase )
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 234
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : List[str] = ShapEPipeline
A_ : Dict = ['prompt']
A_ : Dict = ['prompt']
A_ : Union[str, Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : List[Any] = False
@property
def __UpperCamelCase ( self : int ) -> str:
return 32
@property
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
return 8
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def __UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
A = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
A = PriorTransformer(**__UpperCamelCase )
return model
@property
def __UpperCamelCase ( self : Any ) -> str:
torch.manual_seed(0 )
A = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
A = ShapERenderer(**__UpperCamelCase )
return model
def __UpperCamelCase ( self : List[str] ) -> int:
A = self.dummy_prior
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_renderer
A = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=__UpperCamelCase , clip_sample=__UpperCamelCase , clip_sample_range=1.0 , )
A = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict=0 ) -> Any:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**__UpperCamelCase )
A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
A = output.images[0]
A = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __UpperCamelCase ( self : List[Any] ) -> str:
A = torch_device == 'cpu'
A = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.get_dummy_components()
A = self.pipeline_class(**__UpperCamelCase )
A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = 1
A = 2
A = self.get_dummy_inputs(__UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
A = batch_size * [inputs[key]]
A = pipe(**__UpperCamelCase , num_images_per_prompt=__UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ) -> int:
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
A = ShapEPipeline.from_pretrained('openai/shap-e' )
A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
A = pipe(
'a shark' , generator=__UpperCamelCase , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 106
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : List[str] = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 170
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase : Any = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class __A( unittest.TestCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, '''models/bert/''' ) )
_UpperCamelCase = self.transformer_dir
shutil.copy(
os.path.join(A, '''src/transformers/models/bert/modeling_bert.py''' ), os.path.join(self.transformer_dir, '''models/bert/modeling_bert.py''' ), )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def _UpperCamelCase ( self, A, A, A, A=None ):
"""simple docstring"""
_UpperCamelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
_UpperCamelCase = black.format_str(A, mode=A )
_UpperCamelCase = os.path.join(self.transformer_dir, '''new_code.py''' )
with open(A, '''w''', newline='''\n''' ) as f:
f.write(A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=A )
with open(A, '''r''' ) as f:
self.assertTrue(f.read(), A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(A, A )
def _UpperCamelCase ( self ):
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', REFERENCE_CODE + '''\n''', )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', A, )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', re.sub('''Bert''', '''TestModel''', A ), )
# Copy consistency with a really long name
_UpperCamelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''', F'''{long_class_name}LMPredictionHead''', re.sub('''Bert''', A, A ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', A, overwrite_result=re.sub('''Bert''', '''TestModel''', A ), )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
_UpperCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
_UpperCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
_UpperCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
_UpperCamelCase , _UpperCamelCase = check_copies.convert_to_localized_md(
A, A, localized_readme['''format_model_list'''] )
self.assertFalse(A )
self.assertEqual(A, A )
_UpperCamelCase , _UpperCamelCase = check_copies.convert_to_localized_md(
A, A, localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(A )
_UpperCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
_UpperCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
_UpperCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
_UpperCamelCase , _UpperCamelCase = check_copies.convert_to_localized_md(
A, A, localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(A, A )
| 105
|
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = 0
for ch in input_str:
_UpperCamelCase = ord(lowerCAmelCase )
_UpperCamelCase = pow(2 , lowerCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
| 1
|
from __future__ import annotations
_lowercase : Optional[int] =1.6021E-19 # units = C
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0) != 1:
raise ValueError("""You cannot supply more or less than 2 values""")
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""")
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""")
elif mobility < 0:
raise ValueError("""mobility cannot be negative""")
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> str | Literal[False]:
"""simple docstring"""
a__ : Optional[int] = list(_lowercase)
a__ : List[Any] = list(_lowercase)
a__ : List[str] = 0
for i in range(len(_lowercase)):
if lista[i] != lista[i]:
count += 1
a__ : Dict = """_"""
if count > 1:
return False
else:
return "".join(_lowercase)
def lowerCAmelCase_ ( _lowercase : list[str]) -> list[str]:
"""simple docstring"""
a__ : str = []
while True:
a__ : int = ["""$"""] * len(_lowercase)
a__ : int = []
for i in range(len(_lowercase)):
for j in range(i + 1 , len(_lowercase)):
a__ : Union[str, Any] = compare_string(binary[i] , binary[j])
if k is False:
a__ : Union[str, Any] = """*"""
a__ : int = """*"""
temp.append("""X""")
for i in range(len(_lowercase)):
if checka[i] == "$":
pi.append(binary[i])
if len(_lowercase) == 0:
return pi
a__ : str = list(set(_lowercase))
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Sequence[float]) -> list[str]:
"""simple docstring"""
a__ : Optional[Any] = []
for minterm in minterms:
a__ : List[Any] = """"""
for _ in range(_lowercase):
a__ : str = str(minterm % 2) + string
minterm //= 2
temp.append(_lowercase)
return temp
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str , _lowercase : int) -> bool:
"""simple docstring"""
a__ : List[Any] = list(_lowercase)
a__ : Any = list(_lowercase)
a__ : Dict = 0
for i in range(len(_lowercase)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( _lowercase : list[list[int]] , _lowercase : list[str]) -> list[str]:
"""simple docstring"""
a__ : Optional[int] = []
a__ : str = [0] * len(_lowercase)
for i in range(len(chart[0])):
a__ : Any = 0
a__ : Dict = -1
for j in range(len(_lowercase)):
if chart[j][i] == 1:
count += 1
a__ : Dict = j
if count == 1:
a__ : Dict = 1
for i in range(len(_lowercase)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(_lowercase)):
a__ : Tuple = 0
temp.append(prime_implicants[i])
while True:
a__ : Any = 0
a__ : List[str] = -1
a__ : int = 0
for i in range(len(_lowercase)):
a__ : Any = chart[i].count(1)
if count_n > max_n:
a__ : int = count_n
a__ : Any = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(_lowercase)):
a__ : Optional[Any] = 0
def lowerCAmelCase_ ( _lowercase : list[str] , _lowercase : list[str]) -> list[list[int]]:
"""simple docstring"""
a__ : Dict = [[0 for x in range(len(_lowercase))] for x in range(len(_lowercase))]
for i in range(len(_lowercase)):
a__ : List[Any] = prime_implicants[i].count("""_""")
for j in range(len(_lowercase)):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase):
a__ : List[str] = 1
return chart
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : Dict = int(input("""Enter the no. of variables\n"""))
a__ : Optional[int] = [
float(_lowercase)
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""").split()
]
a__ : List[str] = decimal_to_binary(_lowercase , _lowercase)
a__ : Optional[int] = check(_lowercase)
print("""Prime Implicants are:""")
print(_lowercase)
a__ : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase)
a__ : int = selection(_lowercase , _lowercase)
print("""Essential Prime Implicants are:""")
print(_lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 136
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( _snake_case : int = 5_0 ):
_lowercase = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 227
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = """trocr"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Dict , _lowercase : Dict=5_0_2_6_5 , _lowercase : Tuple=1_0_2_4 , _lowercase : int=1_2 , _lowercase : Dict=1_6 , _lowercase : Any=4_0_9_6 , _lowercase : Optional[int]="gelu" , _lowercase : int=5_1_2 , _lowercase : List[Any]=0.1 , _lowercase : Dict=0.0 , _lowercase : int=0.0 , _lowercase : Any=2 , _lowercase : Tuple=0.02 , _lowercase : Dict=0.0 , _lowercase : int=True , _lowercase : Tuple=False , _lowercase : Dict=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=1 , _lowercase : Tuple=0 , _lowercase : str=2 , **_lowercase : Tuple , ) -> str:
_lowercase = vocab_size
_lowercase = d_model
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = activation_function
_lowercase = max_position_embeddings
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = init_std
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = scale_embedding
_lowercase = use_learned_position_embeddings
_lowercase = layernorm_embedding
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 227
| 1
|
UpperCamelCase : str = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase : Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase : Optional[int] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 37
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669
| 0
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.load(_lowercase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE_ : int = v
else:
SCREAMING_SNAKE_CASE_ : Tuple = v
SCREAMING_SNAKE_CASE_ : Tuple = chkpt['params']
SCREAMING_SNAKE_CASE_ : Any = {n: v for n, v in config.items() if not isinstance(_lowercase , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE_ : int = chkpt['dico_word2id']
SCREAMING_SNAKE_CASE_ : Any = {s + '</w>' if s.find("@@" ) == -1 and i > 1_3 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
SCREAMING_SNAKE_CASE_ : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(_lowercase , _lowercase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + "\n" )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + "\n" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 704
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.48145466, 0.4578275, 0.40821073] , lowercase__=[0.26862954, 0.26130258, 0.27577711] , lowercase__=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE_ : Tuple = do_resize
SCREAMING_SNAKE_CASE_ : List[str] = size
SCREAMING_SNAKE_CASE_ : str = do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size
SCREAMING_SNAKE_CASE_ : int = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Dict = image_std
SCREAMING_SNAKE_CASE_ : List[Any] = do_convert_rgb
def __lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCamelCase ( self , lowercase__=False , lowercase__=False , lowercase__=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
SCREAMING_SNAKE_CASE_ : str = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
SCREAMING_SNAKE_CASE_ : List[str] = [torch.from_numpy(lowercase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_resize" ) )
self.assertTrue(hasattr(lowercase__ , "size" ) )
self.assertTrue(hasattr(lowercase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "center_crop" ) )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "image_mean" ) )
self.assertTrue(hasattr(lowercase__ , "image_std" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 68
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """ZinengTang/tvlt-base"""
lowerCamelCase__ = tempfile.mkdtemp()
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,_lowerCAmelCase )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
lowerCamelCase__ = np.ones([1_20_00] )
lowerCamelCase__ = feature_extractor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(audio=_lowerCAmelCase ,return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
lowerCamelCase__ = np.ones([3, 2_24, 2_24] )
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
lowerCamelCase__ = np.ones([1_20_00] )
lowerCamelCase__ = np.ones([3, 2_24, 2_24] )
lowerCamelCase__ = processor(audio=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = TvltProcessor(image_processor=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" ,)
| 50
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =StableDiffusionControlNetImgaImgPipeline
snake_case =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
_UpperCAmelCase =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_UpperCAmelCase =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase =CLIPTextModel(_snake_case )
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase ={
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
else:
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCAmelCase =2
_UpperCAmelCase =randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
_UpperCAmelCase =floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
_UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
_UpperCAmelCase ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =StableDiffusionControlNetImgaImgPipeline
snake_case =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_snake_case ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_UpperCAmelCase =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
_UpperCAmelCase =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
_UpperCAmelCase =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase =CLIPTextModel(_snake_case )
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =MultiControlNetModel([controlneta, controlneta] )
_UpperCAmelCase ={
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
else:
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCAmelCase =2
_UpperCAmelCase =[
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
_UpperCAmelCase =floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
_UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
_UpperCAmelCase ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
_UpperCAmelCase =10.0
_UpperCAmelCase =4
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =steps
_UpperCAmelCase =scale
_UpperCAmelCase =pipe(**_snake_case )[0]
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =steps
_UpperCAmelCase =scale
_UpperCAmelCase =pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =steps
_UpperCAmelCase =scale
_UpperCAmelCase =pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =steps
_UpperCAmelCase =scale
_UpperCAmelCase =pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def SCREAMING_SNAKE_CASE ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
_UpperCAmelCase =StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase ="evil space-punk bird"
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
_UpperCAmelCase =load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
_UpperCAmelCase =pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type="np" , num_inference_steps=50 , strength=0.6 , )
_UpperCAmelCase =output.images[0]
assert image.shape == (512, 512, 3)
_UpperCAmelCase =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 408
| 0
|
'''simple docstring'''
import torch
def __a ( ) -> int:
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 0
print(f"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 708
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase : List[str] = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : int , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase = deprecated_arg[3:]
setattr(self , SCREAMING_SNAKE_CASE , not kwargs.pop(SCREAMING_SNAKE_CASE ) )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
lowerCAmelCase = kwargs.pop("torchscript" , self.torchscript )
lowerCAmelCase = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
lowerCAmelCase = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'Trace the models using torchscript'} )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowerCAmelCase = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def __A ( self : str ) -> Tuple["torch.device", int]:
"""simple docstring"""
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
lowerCAmelCase = torch.device("cpu" )
lowerCAmelCase = 0
elif is_torch_tpu_available():
lowerCAmelCase = xm.xla_device()
lowerCAmelCase = 0
else:
lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def __A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def __A ( self : Optional[int] ) -> int:
"""simple docstring"""
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __A ( self : Optional[int] ) -> "torch.device":
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def __A ( self : str ) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def __A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.n_gpu > 0
| 159
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->dict[str, str]:
'''simple docstring'''
a : List[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
a : Union[str, Any] = remove_duplicates(key.upper() )
a : List[str] = len(_lowercase )
# First fill cipher with key characters
a : str = {alphabet[i]: char for i, char in enumerate(_lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowercase ) , 26 ):
a : Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
a : Optional[int] = alphabet[i - offset]
a : Optional[int] = char
return cipher_alphabet
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : dict[str, str] ) ->str:
'''simple docstring'''
return "".join(cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : dict[str, str] ) ->str:
'''simple docstring'''
a : List[str] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Optional[Any] = input("Enter message to encode or decode: " ).strip()
a : Tuple = input("Enter keyword: " ).strip()
a : List[Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
a : Optional[int] = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
a : Union[str, Any] = create_cipher_map(_lowercase )
print(func(_lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 633
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Dict:
'''simple docstring'''
a : Any = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a : str = True if "large" in model_name or "huge" in model_name else False
a : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
a : Dict = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a : Union[str, Any] = [3, 3, 3, 3]
a : List[str] = [5, 5, 5, 5]
elif "fl4" in model_name:
a : Any = [4, 4, 4, 4]
a : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
a : Optional[int] = [3, 3, 3, 3]
else:
a : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
a : List[str] = 96
elif "small" in model_name:
a : Union[str, Any] = 96
elif "base" in model_name:
a : Dict = 128
elif "large" in model_name:
a : Union[str, Any] = 192
elif "xlarge" in model_name:
a : Tuple = 256
elif "huge" in model_name:
a : List[str] = 352
# set label information
a : List[Any] = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a : Optional[int] = "imagenet-22k-id2label.json"
else:
a : List[str] = "imagenet-1k-id2label.json"
a : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
a : str = {int(_lowercase ): v for k, v in idalabel.items()}
a : List[str] = {v: k for k, v in idalabel.items()}
a : Dict = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->List[Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
a : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : List[str] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a : List[Any] = "encoder." + name
if "encoder.layers" in name:
a : int = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a : Any = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a : str = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a : Union[str, Any] = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a : Dict = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a : Any = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a : str = "layernorm.weight"
if name == "norm.bias":
a : Optional[Any] = "layernorm.bias"
if "head" in name:
a : Tuple = name.replace("head" , "classifier" )
else:
a : int = "focalnet." + name
return name
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Tuple=False ) ->str:
'''simple docstring'''
a : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a : str = model_name_to_url[model_name]
print("Checkpoint URL: " , _lowercase )
a : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a : Any = state_dict.pop(_lowercase )
a : Any = val
a : Any = get_focalnet_config(_lowercase )
a : Optional[int] = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
a : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Optional[int] = BitImageProcessor(
do_resize=_lowercase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=224 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
a : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
a : Dict = processor(images=_lowercase , return_tensors="pt" )
a : Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a : str = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1E-4 )
a : Dict = model(**_lowercase )
a : List[str] = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a : Union[str, Any] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a : Dict = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a : Dict = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a : Any = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a : str = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
a : Tuple = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 633
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = StableDiffusionXLImgaImgPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Tuple )->int:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_UpperCAmelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_UpperCAmelCase = CLIPTextModel(__UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCamelCase )
_UpperCAmelCase = CLIPTextModelWithProjection(__UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCamelCase )
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowercase__ ( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : int=0 )->Any:
_UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_UpperCAmelCase = image / 2 + 0.5
if str(__UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**__UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = sd_pipe(**__UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[Any] )->Optional[int]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase__ ( self : Optional[Any] )->List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self : List[str] )->str:
pass
def lowercase__ ( self : Dict )->List[str]:
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**__UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(__UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
# forward without prompt embeds
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = 3 * ['''this is a negative prompt''']
_UpperCAmelCase = negative_prompt
_UpperCAmelCase = 3 * [inputs['''prompt''']]
_UpperCAmelCase = sd_pipe(**__UpperCamelCase )
_UpperCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = 3 * ['''this is a negative prompt''']
_UpperCAmelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = sd_pipe.encode_prompt(__UpperCamelCase , negative_prompt=__UpperCamelCase )
_UpperCAmelCase = sd_pipe(
**__UpperCamelCase , prompt_embeds=__UpperCamelCase , negative_prompt_embeds=__UpperCamelCase , pooled_prompt_embeds=__UpperCamelCase , negative_pooled_prompt_embeds=__UpperCamelCase , )
_UpperCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] )->Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict="cpu" , __UpperCamelCase : int=torch.floataa , __UpperCamelCase : Optional[Any]=0 )->Union[str, Any]:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 6_4, 6_4) )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
_UpperCAmelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Optional[Any] )->Optional[Any]:
_UpperCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_inputs(__UpperCamelCase )
_UpperCAmelCase = pipe(**__UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 703
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Tuple , __UpperCamelCase : str )->Tuple:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_UpperCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Optional[Any]:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple )->Optional[Any]:
_UpperCAmelCase = '''sgugger/tiny-distilbert-classification'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple )->str:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , torchscript=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , fpaa=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple )->Any:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
# set architectures equal to `None`
_UpperCAmelCase = None
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def lowercase__ ( self : List[str] )->int:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCamelCase , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Optional[int] )->Optional[Any]:
_UpperCAmelCase = '''sshleifer/tinier_bart'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = '''sshleifer/tinier_bart'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : Optional[Any] )->int:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(__UpperCamelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(__UpperCamelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(__UpperCamelCase , '''env.csv''' ) , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''env.csv''' ) ).exists() )
def lowercase__ ( self : Tuple )->List[Any]:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__UpperCamelCase : str ):
self.assertTrue(hasattr(__UpperCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''current''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , '''log.txt''' ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''log.txt''' ) ).exists() )
| 95
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _a (unittest.TestCase):
"""simple docstring"""
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_normalize
def UpperCamelCase ( self ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """clusters""" ) )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A__ , obj[key] ) )
else:
self.assertEqual(obj[key] , A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = os.path.join(A__ , """image_processor.json""" )
image_processor_first.to_json_file(A__ )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_json_file(A__ ).to_dict()
_SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A__ )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_pretrained(A__ ).to_dict()
_SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , A__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCamelCase ( self ) -> str:
pass
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
_SCREAMING_SNAKE_CASE = Image.open(dataset[4]["""file"""] )
_SCREAMING_SNAKE_CASE = Image.open(dataset[5]["""file"""] )
_SCREAMING_SNAKE_CASE = [imagea, imagea]
return images
@require_vision
@require_torch
class _a (unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_SCREAMING_SNAKE_CASE = prepare_images()
# test non-batched
_SCREAMING_SNAKE_CASE = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_SCREAMING_SNAKE_CASE = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , A__ )
# test batched
_SCREAMING_SNAKE_CASE = image_processing(A__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_SCREAMING_SNAKE_CASE = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , A__ )
| 591
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _a :
"""simple docstring"""
def __init__( self , A__ = None ) -> None:
if components is None:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = list(A__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A__ , self.__components ) ) + ")"
def __add__( self , A__ ) -> Vector:
_SCREAMING_SNAKE_CASE = len(self )
if size == len(A__ ):
_SCREAMING_SNAKE_CASE = [self.__components[i] + other.component(A__ ) for i in range(A__ )]
return Vector(A__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , A__ ) -> Vector:
_SCREAMING_SNAKE_CASE = len(self )
if size == len(A__ ):
_SCREAMING_SNAKE_CASE = [self.__components[i] - other.component(A__ ) for i in range(A__ )]
return Vector(A__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , A__ ) -> Vector:
...
@overload
def __mul__( self , A__ ) -> float:
...
def __mul__( self , A__ ) -> float | Vector:
if isinstance(A__ , (float, int) ):
_SCREAMING_SNAKE_CASE = [c * other for c in self.__components]
return Vector(A__ )
elif isinstance(A__ , A__ ) and len(self ) == len(A__ ):
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = [self.__components[i] * other.component(A__ ) for i in range(A__ )]
return sum(A__ )
else: # error case
raise Exception("""invalid operand!""" )
def UpperCamelCase ( self ) -> Vector:
return Vector(self.__components )
def UpperCamelCase ( self , A__ ) -> float:
if isinstance(A__ , A__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def UpperCamelCase ( self , A__ , A__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
_SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
_SCREAMING_SNAKE_CASE = [c**2 for c in self.__components]
return math.sqrt(sum(A__ ) )
def UpperCamelCase ( self , A__ , A__ = False ) -> float:
_SCREAMING_SNAKE_CASE = self * other
_SCREAMING_SNAKE_CASE = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Vector:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return Vector([0] * dimension )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Vector:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ))
_SCREAMING_SNAKE_CASE = [0] * dimension
_SCREAMING_SNAKE_CASE = 1
return Vector(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Vector:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and (isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ))
)
return x * scalar + y
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Vector:
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
class _a :
"""simple docstring"""
def __init__( self , A__ , A__ , A__ ) -> None:
_SCREAMING_SNAKE_CASE = matrix
_SCREAMING_SNAKE_CASE = w
_SCREAMING_SNAKE_CASE = h
def __str__( self ) -> str:
_SCREAMING_SNAKE_CASE = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
_SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] + other.component(A__ , A__ )
for j in range(self.__width )
]
matrix.append(A__ )
return Matrix(A__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , A__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
_SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] - other.component(A__ , A__ )
for j in range(self.__width )
]
matrix.append(A__ )
return Matrix(A__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , A__ ) -> Matrix:
...
@overload
def __mul__( self , A__ ) -> Vector:
...
def __mul__( self , A__ ) -> Vector | Matrix:
if isinstance(A__ , A__ ): # matrix-vector
if len(A__ ) == self.__width:
_SCREAMING_SNAKE_CASE = zero_vector(self.__height )
for i in range(self.__height ):
_SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] * other.component(A__ )
for j in range(self.__width )
]
ans.change_component(A__ , sum(A__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(A__ , (int, float) ): # matrix-scalar
_SCREAMING_SNAKE_CASE = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A__ , self.__width , self.__height )
return None
def UpperCamelCase ( self ) -> int:
return self.__height
def UpperCamelCase ( self ) -> int:
return self.__width
def UpperCamelCase ( self , A__ , A__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
_SCREAMING_SNAKE_CASE = value
else:
raise Exception("""change_component: indices out of bounds""" )
def UpperCamelCase ( self , A__ , A__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
_SCREAMING_SNAKE_CASE = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A__ ) ):
_SCREAMING_SNAKE_CASE = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self , A__ , A__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A__ , A__ )
else:
raise Exception("""Indices out of bounds""" )
def UpperCamelCase ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_SCREAMING_SNAKE_CASE = [
self.__matrix[0][y] * self.cofactor(0 , A__ ) for y in range(self.__width )
]
return sum(A__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Matrix:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [[0] * n for _ in range(SCREAMING_SNAKE_CASE_ )]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Matrix:
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = [
[random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )
]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 591
| 1
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
_UpperCamelCase : Tuple = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
_UpperCamelCase : Optional[Any] = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
lowercase = json.loads(f.read() )
lowercase = collections.OrderedDict()
lowercase = collections.OrderedDict()
lowercase = collections.OrderedDict()
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
lowercase = f.readlines()
lowercase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(__snake_case ):
lowercase = b
lowercase = idx
for wd in b:
lowercase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a ( a_ ):
UpperCAmelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCAmelCase_ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[Any] =["input_ids", "attention_mask"]
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|startoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase=False , **_lowerCamelCase , ):
super().__init__(
unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , do_clean_text=_lowerCamelCase , **_lowerCamelCase , )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowercase = do_clean_text
lowercase , lowercase , lowercase , lowercase = load_vocab_and_emoji(_lowerCamelCase , _lowerCamelCase )
lowercase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase_ ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCamelCase_ ( self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase_ ( self , _lowerCamelCase ):
return self.subword_tokenizer.tokenize(_lowerCamelCase , clean=self.do_clean_text )
def UpperCamelCase_ ( self , _lowerCamelCase ):
return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self , _lowerCamelCase ):
return self.subword_tokenizer.convert_id_to_token(_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = ''.join(_lowerCamelCase ).strip()
return out_string
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
lowercase = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowercase = 0
if os.path.isdir(_lowerCamelCase ):
lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowercase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowercase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowercase = token_index
writer.write(','.join(_lowerCamelCase ) + '\n' )
index += 1
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , _lowerCamelCase )
return vocab_file, emoji_file
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = vocab # same as swe
lowercase = ids_to_tokens # same as bpe
lowercase = emoji
lowercase = np.max([len(_lowerCamelCase ) for w in self.vocab.keys()] )
lowercase = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowercase = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowercase = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowercase = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowercase = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowercase = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowercase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowercase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowercase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = self.content_repattera.sub('<URL>' , _lowerCamelCase )
lowercase = self.content_repattera.sub('<EMAIL>' , _lowerCamelCase )
lowercase = self.content_repattera.sub('<TEL>' , _lowerCamelCase )
lowercase = self.content_repattera.sub('<DATE>' , _lowerCamelCase )
lowercase = self.content_repattera.sub('<DATE>' , _lowerCamelCase )
lowercase = self.content_repattera.sub('<PRICE>' , _lowerCamelCase )
lowercase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowercase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ):
lowercase = text.replace(' ' , '<SP>' )
lowercase = text.replace(' ' , '<SP>' )
lowercase = text.replace('\r\n' , '<BR>' )
lowercase = text.replace('\n' , '<BR>' )
lowercase = text.replace('\r' , '<BR>' )
lowercase = text.replace('\t' , '<TAB>' )
lowercase = text.replace('—' , 'ー' )
lowercase = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowercase = text.replace(_lowerCamelCase , _lowerCamelCase )
if clean:
lowercase = self.clean_text(_lowerCamelCase )
def check_simbol(_lowerCamelCase ):
lowercase = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 2:
lowercase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(_lowerCamelCase ):
lowercase = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 3:
lowercase = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
lowercase = 0
lowercase = []
while pos < len(_lowerCamelCase ):
lowercase = min(len(_lowerCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowercase = [] # (token_id, token, pos)
for e in range(_lowerCamelCase , _lowerCamelCase , -1 ):
lowercase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_lowerCamelCase ) > 2:
lowercase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_lowerCamelCase ) > 0:
# the smallest token_id is adopted
lowercase , lowercase , lowercase = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[0] )[0]
result.append(_lowerCamelCase )
lowercase = e
else:
lowercase = pos + 1
lowercase = text[pos:end]
if check_simbol(_lowerCamelCase ):
result.append('<KIGOU>' )
elif checkuae(_lowerCamelCase ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowercase = end
return result
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase="\n" ):
lowercase = []
lowercase = []
lowercase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode('utf-8' , errors='replace' ) )
lowercase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_lowerCamelCase )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode('utf-8' , errors='replace' ) )
lowercase = ''.join(_lowerCamelCase )
return text
| 134
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase : Optional[Any] = Lock()
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase = min(__snake_case , __snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase = max(__snake_case , __snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
'''simple docstring'''
lowercase = []
lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase = Pipe()
lowercase = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase = temp_rs
lowercase = temp_rr
for i in range(1 , len(__snake_case ) - 1 ):
lowercase = Pipe()
lowercase = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase = temp_rs
lowercase = temp_rr
process_array_.append(
Process(
target=__snake_case , args=(
len(__snake_case ) - 1,
arr[len(__snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__snake_case ) ):
lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__snake_case )
lowercase = odd_even_transposition(__snake_case )
print('Sorted List\n' )
print(*__snake_case )
if __name__ == "__main__":
main()
| 134
| 1
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_lowercase = logging.get_logger(__name__)
class __A ( A_ ):
def __init__(self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 157
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowercase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_lowercase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowercase = '''allenai'''
def _A (UpperCamelCase : Tuple ) ->Any:
'''simple docstring'''
lowerCamelCase__ : str = dict((re.sub(r"""@@$""" , """""" , UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , UpperCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : List[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
lowerCamelCase__ : List[str] = d[k] # restore
return da
def _A (UpperCamelCase : str , UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
assert os.path.exists(UpperCamelCase )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
print(f"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
lowerCamelCase__ : str = basename(UpperCamelCase )
lowerCamelCase__ : str = dirname(UpperCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : Tuple = cls.hub_models()
lowerCamelCase__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowerCamelCase__ : List[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}" )
lowerCamelCase__ : List[str] = hub_utils.from_pretrained(
UpperCamelCase , UpperCamelCase , UpperCamelCase , archive_map=UpperCamelCase , **UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = vars(chkpt["""args"""]["""model"""] )
lowerCamelCase__ : Union[str, Any] = args["""source_lang"""]
lowerCamelCase__ : Dict = args["""target_lang"""]
lowerCamelCase__ : List[Any] = dirname(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(UpperCamelCase )
# dicts
lowerCamelCase__ : int = os.path.join(UpperCamelCase , f"dict.{src_lang}.txt" )
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , f"dict.{tgt_lang}.txt" )
lowerCamelCase__ : Any = Dictionary.load(UpperCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : Optional[Any] = len(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , """vocab-src.json""" )
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Dict = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : Tuple = False
break
lowerCamelCase__ : Union[str, Any] = Dictionary.load(UpperCamelCase )
lowerCamelCase__ : List[str] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : str = len(UpperCamelCase )
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , """vocab-tgt.json""" )
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.exists(UpperCamelCase ):
break
with open(UpperCamelCase , encoding="""utf-8""" ) as fin:
lowerCamelCase__ : Dict = fin.read()
lowerCamelCase__ : Tuple = re.sub(r""" \d+$""" , """""" , UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"Generating {merges_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCamelCase )
# model config
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
lowerCamelCase__ : str = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowerCamelCase__ : Dict = 5
lowerCamelCase__ : Optional[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : Any = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowerCamelCase__ : Union[str, Any] = 1.0
print(f"Generating {fsmt_model_config_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# tokenizer config
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# model
lowerCamelCase__ : Tuple = chkpt["""models"""][0]
lowerCamelCase__ : Tuple = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : Tuple = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = FSMTConfig.from_pretrained(UpperCamelCase )
lowerCamelCase__ : List[str] = FSMTForConditionalGeneration(UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
# save
lowerCamelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase )
print(f"Generating {pytorch_weights_dump_path}" )
torch.save(UpperCamelCase , UpperCamelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f"cd {data_root}" )
print(f"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 157
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Union[str, Any] = {'vocab_file': 'spm_char.model'}
A__ : Dict = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
A__ : Tuple = {
'microsoft/speecht5_asr': 10_24,
'microsoft/speecht5_tts': 10_24,
'microsoft/speecht5_vc': 10_24,
}
class _UpperCAmelCase ( __A ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : Any="<s>", lowerCamelCase : List[str]="</s>", lowerCamelCase : List[Any]="<unk>", lowerCamelCase : Optional[int]="<pad>", lowerCamelCase : Optional[Any] = None, **lowerCamelCase : List[Any], ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, unk_token=UpperCamelCase__, pad_token=UpperCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCamelCase__, )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Any, lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : List[Any], lowerCamelCase : Dict ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__, out_type=UpperCamelCase__ )
def lowercase__ ( self : str, lowerCamelCase : Dict ):
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCamelCase__ )
def lowercase__ ( self : int, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def lowercase__ ( self : List[Any], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = []
lowercase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
lowercase__ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def lowercase__ ( self : Dict, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : List[str] = None, lowerCamelCase : Dict = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + suffix_ones
return ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def lowercase__ ( self : int, lowerCamelCase : int, lowerCamelCase : str = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
UpperCamelCase__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__, '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 709
|
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.