code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
lowerCAmelCase__ = 'vit_msn'
def __init__( self , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-06 , UpperCAmelCase=224 , UpperCAmelCase=16 , UpperCAmelCase=3 , UpperCAmelCase=True , **UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = qkv_bias
| 358
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( __A ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 0
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
def is_in_circle(__lowerCamelCase: float , __lowerCamelCase: float ) -> bool:
lowercase_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
lowercase_ = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Callable[[float], float] , __lowerCamelCase: float = 0.0 , __lowerCamelCase: float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: float = 0.0 , __lowerCamelCase: float = 1.0 ):
'''simple docstring'''
def identity_function(__lowerCamelCase: float ) -> float:
return x
lowercase_ = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
def function_to_integrate(__lowerCamelCase: float ) -> float:
return sqrt(4.0 - x * x )
lowercase_ = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1000 ):
'''simple docstring'''
return sum(e for e in range(3 , lowerCAmelCase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 361
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
SCREAMING_SNAKE_CASE__ = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 4
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = "left"
def __init__( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<sep>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<eop>", "<eod>"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
lowercase_ = 3
lowercase_ = do_lower_case
lowercase_ = remove_space
lowercase_ = keep_accents
lowercase_ = vocab_file
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.sp_model )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
'''simple docstring'''
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if self.remove_space:
lowercase_ = ' '.join(inputs.strip().split() )
else:
lowercase_ = inputs
lowercase_ = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
lowercase_ = unicodedata.normalize("NFKD" , UpperCAmelCase )
lowercase_ = ''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase )] )
if self.do_lower_case:
lowercase_ = outputs.lower()
return outputs
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = self.preprocess_text(UpperCAmelCase )
lowercase_ = self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
lowercase_ = []
for piece in pieces:
if len(UpperCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowercase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase_ = cur_pieces[1:]
else:
lowercase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase )
else:
new_pieces.append(UpperCAmelCase )
return new_pieces
def A__ ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.sp_model.PieceToId(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , " " ).strip()
return out_string
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
lowercase_ = kwargs.pop("use_source_tokenizer" , UpperCAmelCase )
lowercase_ = self.convert_ids_to_tokens(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase_ = []
lowercase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) )
lowercase_ = []
sub_texts.append(UpperCAmelCase )
else:
current_sub_text.append(UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase_ = ''.join(UpperCAmelCase )
lowercase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase_ = self.clean_up_tokenization(UpperCAmelCase )
return clean_text
else:
return text
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> Any:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1, 1]
return ([0] * len(UpperCAmelCase )) + [1, 1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Dict:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase_ = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , "wb" ) as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 362
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = XCLIPTextConfig()
# derive patch size from model name
lowercase_ = model_name.find("patch" )
lowercase_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
lowercase_ = XCLIPVisionConfig(patch_size=_lowerCamelCase , num_frames=_lowerCamelCase )
if "large" in model_name:
lowercase_ = 768
lowercase_ = 3072
lowercase_ = 12
lowercase_ = 1024
lowercase_ = 4096
lowercase_ = 16
lowercase_ = 24
lowercase_ = 768
lowercase_ = 3072
if model_name == "xclip-large-patch14-16-frames":
lowercase_ = 336
lowercase_ = XCLIPConfig.from_text_vision_configs(_lowerCamelCase , _lowerCamelCase )
if "large" in model_name:
lowercase_ = 768
return config
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] ):
'''simple docstring'''
if name == "token_embedding.weight":
lowercase_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
lowercase_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
lowercase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowercase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowercase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowercase_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
lowercase_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
lowercase_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
lowercase_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
lowercase_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
lowercase_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
lowercase_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
lowercase_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
lowercase_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
lowercase_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
lowercase_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
lowercase_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
lowercase_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
lowercase_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
lowercase_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
lowercase_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
lowercase_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Dict ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(_lowerCamelCase )
if "attn.in_proj" in key:
lowercase_ = key.split("." )
if key.startswith("visual" ):
lowercase_ = key_split[3]
lowercase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase_ = val[
:dim, :
]
lowercase_ = val[
dim : dim * 2, :
]
lowercase_ = val[
-dim:, :
]
else:
lowercase_ = val[
:dim
]
lowercase_ = val[
dim : dim * 2
]
lowercase_ = val[
-dim:
]
else:
if "weight" in key:
lowercase_ = val[
:dim, :
]
lowercase_ = val[
dim : dim * 2, :
]
lowercase_ = val[
-dim:, :
]
else:
lowercase_ = val[:dim]
lowercase_ = val[
dim : dim * 2
]
lowercase_ = val[-dim:]
elif key.startswith("mit" ):
lowercase_ = key_split[2]
lowercase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = key_split[2]
lowercase_ = config.text_config.hidden_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[
dim : dim * 2, :
]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[:dim]
lowercase_ = val[
dim : dim * 2
]
lowercase_ = val[-dim:]
else:
lowercase_ = rename_key(_lowerCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase_ = val.T
lowercase_ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
if num_frames == 8:
lowercase_ = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
lowercase_ = """eating_spaghetti.npy"""
elif num_frames == 32:
lowercase_ = """eating_spaghetti_32_frames.npy"""
lowercase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=_lowerCamelCase , repo_type="dataset" , )
lowercase_ = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int=None , __lowerCamelCase: List[Any]=False ):
'''simple docstring'''
lowercase_ = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
lowercase_ = model_to_url[model_name]
lowercase_ = 8
if "16-frames" in model_name:
lowercase_ = 16
elif "shot" in model_name:
lowercase_ = 32
lowercase_ = get_xclip_config(_lowerCamelCase , _lowerCamelCase )
lowercase_ = XCLIPModel(_lowerCamelCase )
model.eval()
if "drive" in checkpoint_url:
lowercase_ = """pytorch_model.bin"""
gdown.cached_download(_lowerCamelCase , _lowerCamelCase , quiet=_lowerCamelCase )
lowercase_ = torch.load(_lowerCamelCase , map_location="cpu" )["""model"""]
else:
lowercase_ = torch.hub.load_state_dict_from_url(_lowerCamelCase )["""model"""]
lowercase_ = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
lowercase_ = XCLIPModel(_lowerCamelCase )
lowercase_ = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase_ = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
lowercase_ = VideoMAEImageProcessor(size=_lowerCamelCase )
lowercase_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
lowercase_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
lowercase_ = XCLIPProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
lowercase_ = prepare_video(_lowerCamelCase )
lowercase_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=_lowerCamelCase , return_tensors="pt" , padding=_lowerCamelCase )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
lowercase_ = model(**_lowerCamelCase )
# Verify outputs
lowercase_ = outputs.logits_per_video
lowercase_ = logits_per_video.softmax(dim=1 )
print("Probs:" , _lowerCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase_ = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase_ = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
lowercase_ = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase_ = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
lowercase_ = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase_ = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase_ = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase_ = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase_ = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase_ = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase_ = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase_ = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase_ = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase_ = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase_ = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase_ = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase_ = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase_ = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(F'Model name {model_name} not supported' )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(_lowerCamelCase , organization="nielsr" )
processor.push_to_hub(_lowerCamelCase , organization="nielsr" )
slow_tokenizer.push_to_hub(_lowerCamelCase , organization="nielsr" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 363
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 0
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
SCREAMING_SNAKE_CASE__ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowercase_ = [image]
lowercase_ = [trans(img.convert("RGB" ) ) for img in image]
lowercase_ = torch.stack(a_ )
return image
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowercase_ = max(num_inference_steps - init_timestep , 0 )
lowercase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}' )
lowercase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowercase_ = init_latents.shape
lowercase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print("add noise to latents at timestep" , lowercase_ )
lowercase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowercase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = 0.8 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 0.0 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowercase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowercase_ , lowercase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowercase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowercase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowercase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowercase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowercase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 364
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCamelCase ( __lowercase ):
"""simple docstring"""
lowerCAmelCase__ = "sew-d"
def __init__( self , UpperCAmelCase=32 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase=2 , UpperCAmelCase=512 , UpperCAmelCase=256 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=("p2c", "c2p") , UpperCAmelCase="layer_norm" , UpperCAmelCase="gelu_python" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-7 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase=False , UpperCAmelCase=128 , UpperCAmelCase=16 , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=10 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=10 , UpperCAmelCase=0 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=256 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
lowercase_ = hidden_size
lowercase_ = feat_extract_norm
lowercase_ = feat_extract_activation
lowercase_ = list(_a )
lowercase_ = list(_a )
lowercase_ = list(_a )
lowercase_ = conv_bias
lowercase_ = num_conv_pos_embeddings
lowercase_ = num_conv_pos_embedding_groups
lowercase_ = len(self.conv_dim )
lowercase_ = num_hidden_layers
lowercase_ = intermediate_size
lowercase_ = squeeze_factor
lowercase_ = max_position_embeddings
lowercase_ = position_buckets
lowercase_ = share_att_key
lowercase_ = relative_attention
lowercase_ = norm_rel_ebd
lowercase_ = list(_a )
lowercase_ = hidden_act
lowercase_ = num_attention_heads
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = feat_proj_dropout
lowercase_ = final_dropout
lowercase_ = layer_norm_eps
lowercase_ = feature_layer_norm_eps
lowercase_ = initializer_range
lowercase_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ = apply_spec_augment
lowercase_ = mask_time_prob
lowercase_ = mask_time_length
lowercase_ = mask_time_min_masks
lowercase_ = mask_feature_prob
lowercase_ = mask_feature_length
lowercase_ = mask_feature_min_masks
# ctc loss
lowercase_ = ctc_loss_reduction
lowercase_ = ctc_zero_infinity
# sequence classification
lowercase_ = use_weighted_layer_sum
lowercase_ = classifier_proj_size
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 365
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 0
|
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ = ["note_seq"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def A__ ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def A__ ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 366
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 0
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = logging.get_logger()
# the current default level is logging.WARNING
lowercase_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase__ )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = logging.get_verbosity()
lowercase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase_ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowerCAmelCase__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def A__ ( self ) -> Any:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowercase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase_ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCAmelCase__ )
lowercase_ = logging.log_levels[env_level_str]
lowercase_ = logging.get_verbosity()
self.assertEqual(
lowerCAmelCase__ , lowerCAmelCase__ , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
lowercase_ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
lowercase_ = logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
lowercase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase_ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning_advice(lowerCAmelCase__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning_advice(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + "\n" )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 367
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 0
|
import random
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = num - 1
lowercase_ = 0
while s % 2 == 0:
lowercase_ = s // 2
t += 1
for _ in range(5 ):
lowercase_ = random.randrange(2 , num - 1 )
lowercase_ = pow(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if v != 1:
lowercase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase_ = i + 1
lowercase_ = (v**2) % num
return True
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if num < 2:
return False
lowercase_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_UpperCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1024 ):
'''simple docstring'''
while True:
lowercase_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_UpperCamelCase ):
return num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 368
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __lowerCamelCase ( a__ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRContextEncoderTokenizer
class __lowerCamelCase ( a__ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRQuestionEncoderTokenizer
SCREAMING_SNAKE_CASE__ = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
SCREAMING_SNAKE_CASE__ = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
SCREAMING_SNAKE_CASE__ = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a__ )
class __lowerCamelCase :
"""simple docstring"""
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
elif titles is None or texts is None:
lowercase_ = titles if texts is None else texts
return super().__call__(
_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
lowercase_ = titles if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [titles]
lowercase_ = texts if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [texts]
lowercase_ = len(_lowerCamelCase )
lowercase_ = questions if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [questions] * n_passages
assert len(_lowerCamelCase ) == len(
_lowerCamelCase ), F'There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts.'
lowercase_ = super().__call__(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['''input_ids''']
lowercase_ = super().__call__(_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['''input_ids''']
lowercase_ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase , _lowerCamelCase )
]
}
if return_attention_mask is not False:
lowercase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase_ = attention_mask
return self.pad(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 16 , UpperCAmelCase = 64 , UpperCAmelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase_ = reader_input['''input_ids''']
lowercase_ = reader_output[:3]
lowercase_ = len(_lowerCamelCase )
lowercase_ = sorted(range(_lowerCamelCase ) , reverse=_lowerCamelCase , key=relevance_logits.__getitem__ )
lowercase_ = []
for doc_id in sorted_docs:
lowercase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase_ = sequence_ids.index(self.pad_token_id )
else:
lowercase_ = len(_lowerCamelCase )
lowercase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCamelCase , top_spans=_lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCamelCase , start_index=_lowerCamelCase , end_index=_lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase_ = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase_ = sorted(_lowerCamelCase , key=lambda UpperCAmelCase : x[1] , reverse=_lowerCamelCase )
lowercase_ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
lowercase_ = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __lowerCamelCase ( a__ , a__ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = DPRReaderTokenizer
| 369
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = tempfile.mkdtemp()
# fmt: off
lowercase_ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowercase_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowercase_ = {"""unk_token""": """<unk>"""}
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
lowercase_ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
lowercase_ = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def A__ ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **_lowerCAmelCase )
def A__ ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **_lowerCAmelCase )
def A__ ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase_ = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = self.get_image_processor()
lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase_ = self.get_image_processor(do_normalize=_lowerCAmelCase )
lowercase_ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase_ = self.prepare_image_inputs()
lowercase_ = image_processor(_lowerCAmelCase , return_tensors="np" )
lowercase_ = processor(images=_lowerCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase_ = """lower newer"""
lowercase_ = processor(text=_lowerCAmelCase , return_tensors="np" )
lowercase_ = tokenizer(_lowerCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase_ = """lower newer"""
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = """google/owlvit-base-patch32"""
lowercase_ = OwlViTProcessor.from_pretrained(_lowerCAmelCase )
lowercase_ = ["""cat""", """nasa badge"""]
lowercase_ = processor(text=_lowerCAmelCase )
lowercase_ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = """google/owlvit-base-patch32"""
lowercase_ = OwlViTProcessor.from_pretrained(_lowerCAmelCase )
lowercase_ = [["""cat""", """nasa badge"""], ["""person"""]]
lowercase_ = processor(text=_lowerCAmelCase )
lowercase_ = 16
lowercase_ = len(_lowerCAmelCase )
lowercase_ = max([len(_lowerCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = """google/owlvit-base-patch32"""
lowercase_ = OwlViTProcessor.from_pretrained(_lowerCAmelCase )
lowercase_ = ["""cat""", """nasa badge"""]
lowercase_ = processor(text=_lowerCAmelCase )
lowercase_ = 16
lowercase_ = inputs["""input_ids"""]
lowercase_ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase_ = self.prepare_image_inputs()
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(images=_lowerCAmelCase , query_images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
lowercase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ = processor.batch_decode(_lowerCAmelCase )
lowercase_ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 370
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
for i in range(0 , UpperCAmelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] ):
'''simple docstring'''
for i in range(UpperCAmelCase__ , 0 , -1 ):
for _ in range(UpperCAmelCase__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(UpperCAmelCase__ ) # upper half
reverse_floyd(UpperCAmelCase__ ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
SCREAMING_SNAKE_CASE__ = 1
while K:
SCREAMING_SNAKE_CASE__ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
SCREAMING_SNAKE_CASE__ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 371
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297
| 0
|
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCamelCase ( __lowercase , __lowercase ):
"""simple docstring"""
@register_to_config
def __init__( self , *,
UpperCAmelCase = 4 , UpperCAmelCase = 768 , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase_ = nn.Parameter(torch.zeros(_a ) )
# parameters for additional clip time embeddings
lowercase_ = nn.Linear(_a , _a )
lowercase_ = nn.Linear(_a , _a )
# parameters for encoder hidden states
lowercase_ = clip_extra_context_tokens
lowercase_ = nn.Linear(
_a , self.clip_extra_context_tokens * cross_attention_dim )
lowercase_ = nn.Linear(_a , _a )
lowercase_ = nn.LayerNorm(_a )
def A__ ( self , *, UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowercase_ = image_embeddings.shape[0]
lowercase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowercase_ = classifier_free_guidance_embeddings.expand(
_a , -1 )
lowercase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowercase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowercase_ = self.embedding_proj(_a )
lowercase_ = self.clip_image_embeddings_project_to_time_embeddings(_a )
lowercase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowercase_ = self.clip_extra_context_tokens_proj(_a )
lowercase_ = clip_extra_context_tokens.reshape(_a , -1 , self.clip_extra_context_tokens )
lowercase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowercase_ = self.encoder_hidden_states_proj(_a )
lowercase_ = self.text_encoder_hidden_states_norm(_a )
lowercase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 350
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<sep>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<eop>", "<eod>"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase_ = 3
lowercase_ = do_lower_case
lowercase_ = remove_space
lowercase_ = keep_accents
lowercase_ = vocab_file
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowercase_ = jieba
lowercase_ = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
if self.remove_space:
lowercase_ = " ".join(inputs.strip().split() )
else:
lowercase_ = inputs
lowercase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowercase_ = unicodedata.normalize("NFKD" , lowercase_ )
lowercase_ = "".join([c for c in outputs if not unicodedata.combining(lowercase_ )] )
if self.do_lower_case:
lowercase_ = outputs.lower()
return outputs
def A__ ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.preprocess_text(lowercase_ )
lowercase_ = self.sp_model.encode(lowercase_ , out_type=lowercase_ )
lowercase_ = []
for piece in pieces:
if len(lowercase_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowercase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase_ = cur_pieces[1:]
else:
lowercase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase_ )
else:
new_pieces.append(lowercase_ )
return new_pieces
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.sp_model.PieceToId(lowercase_ )
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase_ )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = "".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1, 1]
return ([0] * len(lowercase_ )) + [1, 1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase_ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = super()._decode(*lowercase_ , **lowercase_ )
lowercase_ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 351
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 0
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = sorted(zip(_snake_case , _snake_case ) , key=lambda __lowerCamelCase : x[0] / x[1] , reverse=_snake_case )
lowercase_ = [i[0] for i in r], [i[1] for i in r]
lowercase_ = list(accumulate(_snake_case ) )
lowercase_ = bisect(_snake_case , _snake_case )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
| 0
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: Optional[int] = None ):
'''simple docstring'''
lowercase_ = tesseract_config if tesseract_config is not None else ''
# apply OCR
lowercase_ = to_pil_image(_A )
lowercase_ = pil_image.size
lowercase_ = pytesseract.image_to_data(_A , lang=_A , output_type="dict" , config=_A )
lowercase_ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase_ = [idx for idx, word in enumerate(_A ) if not word.strip()]
lowercase_ = [word for idx, word in enumerate(_A ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase_ = []
for x, y, w, h in zip(_A , _A , _A , _A ):
lowercase_ = [x, y, x + w, y + h]
actual_boxes.append(_A )
# finally, normalize the bounding boxes
lowercase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_A , _A , _A ) )
assert len(_A ) == len(_A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
lowerCAmelCase__ = ["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = "" , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase_ = size if size is not None else {'height': 224, 'width': 224}
lowercase_ = get_size_dict(snake_case__ )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = apply_ocr
lowercase_ = ocr_lang
lowercase_ = tesseract_config
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase_ = (size['height'], size['width'])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(snake_case__ )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(snake_case__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
lowercase_ = []
lowercase_ = []
for image in images:
lowercase_ = apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
lowercase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase_ = [flip_channel_order(snake_case__ ) for image in images]
lowercase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowercase_ = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ )
if apply_ocr:
lowercase_ = words_batch
lowercase_ = boxes_batch
return data
| 353
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
| 0
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger("""transformers.models.speecht5""")
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Dict ):
'''simple docstring'''
hf_model.apply_weight_norm()
lowercase_ = checkpoint["input_conv.weight_g"]
lowercase_ = checkpoint["input_conv.weight_v"]
lowercase_ = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowercase_ = checkpoint[F'upsamples.{i}.1.weight_g']
lowercase_ = checkpoint[F'upsamples.{i}.1.weight_v']
lowercase_ = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
lowercase_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
lowercase_ = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
lowercase_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
lowercase_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
lowercase_ = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
lowercase_ = checkpoint["output_conv.1.weight_g"]
lowercase_ = checkpoint["output_conv.1.weight_v"]
lowercase_ = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: Dict=None , __lowerCamelCase: List[Any]=None , ):
'''simple docstring'''
if config_path is not None:
lowercase_ = SpeechTaHifiGanConfig.from_pretrained(__lowerCamelCase )
else:
lowercase_ = SpeechTaHifiGanConfig()
lowercase_ = SpeechTaHifiGan(__lowerCamelCase )
lowercase_ = torch.load(__lowerCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , __lowerCamelCase , __lowerCamelCase )
lowercase_ = np.load(__lowerCamelCase )
lowercase_ = stats[0].reshape(-1 )
lowercase_ = stats[1].reshape(-1 )
lowercase_ = torch.from_numpy(__lowerCamelCase ).float()
lowercase_ = torch.from_numpy(__lowerCamelCase ).float()
model.save_pretrained(__lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 354
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 0
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Path , __lowerCamelCase: list ):
'''simple docstring'''
lowercase_ = """\n""".join(snake_case_ )
Path(snake_case_ ).open("w" ).writelines(snake_case_ )
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-mbart'''
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __lowerCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowercase_ = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowercase_ = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
lowercase_ = """translation_en_to_de""" if model == T5_TINY else """summarization"""
lowercase_ = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE_ ).exists()
# os.remove(Path(output_file_name))
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.run_eval_tester(SCREAMING_SNAKE_CASE_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
self.run_eval_tester(SCREAMING_SNAKE_CASE_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowercase_ = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowercase_ = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
lowercase_ = Path(self.get_auto_remove_tmp_dir() )
lowercase_ = str(tmp_dir / "scores.json" )
lowercase_ = str(tmp_dir / "val.target" )
_dump_articles(SCREAMING_SNAKE_CASE_ , text["en"] )
_dump_articles(SCREAMING_SNAKE_CASE_ , text["de"] )
lowercase_ = """translation_en_to_de""" if model == T5_TINY else """summarization"""
lowercase_ = F'\n run_eval_search.py\n {model}\n {str(SCREAMING_SNAKE_CASE_ )}\n {str(SCREAMING_SNAKE_CASE_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(SCREAMING_SNAKE_CASE_ , "argv" , SCREAMING_SNAKE_CASE_ ):
with CaptureStdout() as cs:
run_search()
lowercase_ = [""" num_beams | length_penalty""", model, """Best score args"""]
lowercase_ = ["""Info"""]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE_ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE_ ) )
| 355
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 0
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCamelCase ( __UpperCamelCase ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
if tokenize_kwargs is None:
lowercase_ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
lowercase_ = truncation
lowercase_ = tokenize_kwargs
lowercase_ = {}
if return_tensors is not None:
lowercase_ = return_tensors
return preprocess_params, {}, postprocess_params
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.framework
lowercase_ = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
return model_inputs
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = self.model(**UpperCAmelCase )
return model_outputs
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
| 356
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( __lowerCamelCase ):
"""simple docstring"""
@require_torch
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
lowercase_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
lowercase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
lowercase_ = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(UpperCamelCase_ )
BertModel.from_pretrained(UpperCamelCase_ )
BertTokenizer.from_pretrained(UpperCamelCase_ )
pipeline(task="fill-mask" , model=UpperCamelCase_ )
# baseline - just load from_pretrained with normal network
lowercase_ = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
lowercase_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase_ = "1"
lowercase_ = subprocess.run(UpperCamelCase_ , env=UpperCamelCase_ , check=UpperCamelCase_ , capture_output=UpperCamelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
lowercase_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
lowercase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
lowercase_ = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(UpperCamelCase_ )
BertModel.from_pretrained(UpperCamelCase_ )
BertTokenizer.from_pretrained(UpperCamelCase_ )
pipeline(task="fill-mask" , model=UpperCamelCase_ )
# baseline - just load from_pretrained with normal network
lowercase_ = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
lowercase_ = self.get_env()
lowercase_ = subprocess.run(UpperCamelCase_ , env=UpperCamelCase_ , check=UpperCamelCase_ , capture_output=UpperCamelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
lowercase_ = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
lowercase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
lowercase_ = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
lowercase_ = self.get_env()
lowercase_ = subprocess.run(UpperCamelCase_ , env=UpperCamelCase_ , check=UpperCamelCase_ , capture_output=UpperCamelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
lowercase_ = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase_ = "1"
lowercase_ = subprocess.run(UpperCamelCase_ , env=UpperCamelCase_ , check=UpperCamelCase_ , capture_output=UpperCamelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = "\nfrom transformers import pipeline\n "
lowercase_ = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
lowercase_ = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
lowercase_ = self.get_env()
lowercase_ = "1"
lowercase_ = [sys.executable, "-c", "\n".join([load, mock, run] )]
lowercase_ = subprocess.run(UpperCamelCase_ , env=UpperCamelCase_ , check=UpperCamelCase_ , capture_output=UpperCamelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = "\nfrom transformers import AutoModel\n "
lowercase_ = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
lowercase_ = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
lowercase_ = self.get_env()
lowercase_ = subprocess.run(UpperCamelCase_ , env=UpperCamelCase_ , check=UpperCamelCase_ , capture_output=UpperCamelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase_ = "1"
lowercase_ = subprocess.run(UpperCamelCase_ , env=UpperCamelCase_ , check=UpperCamelCase_ , capture_output=UpperCamelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 357
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 0
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int=None , __lowerCamelCase: Union[str, Any]=None ):
'''simple docstring'''
if "." in tensor_name:
lowercase_ = tensor_name.split("." )
for split in splits[:-1]:
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase_ = value.to("cpu" )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
lowercase_ = torch.tensor(__lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
lowercase_ = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase_ = value.to(__lowerCamelCase )
else:
lowercase_ = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict=None , __lowerCamelCase: Any=None , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Optional[int]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: int=None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[Any]=None ):
'''simple docstring'''
lowercase_ = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def SCREAMING_SNAKE_CASE_ ( *__lowerCamelCase: Dict , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( *__lowerCamelCase: str , **__lowerCamelCase: str ):
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(__lowerCamelCase , [] )
lowercase_ = len(__lowerCamelCase ) > 0
# Check if it is a base model
lowercase_ = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(__lowerCamelCase ) - set(__lowerCamelCase )
lowercase_ = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowercase_ = [".weight", ".bias"]
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(__lowerCamelCase , "" )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 358
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ = 'yolos'
def __init__( self , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=[512, 864] , UpperCAmelCase=16 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=100 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**a_ )
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = qkv_bias
lowercase_ = num_detection_tokens
lowercase_ = use_mid_position_embeddings
lowercase_ = auxiliary_loss
# Hungarian matcher
lowercase_ = class_cost
lowercase_ = bbox_cost
lowercase_ = giou_cost
# Loss coefficients
lowercase_ = bbox_loss_coefficient
lowercase_ = giou_loss_coefficient
lowercase_ = eos_coefficient
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ = version.parse("1.11" )
@property
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return 1e-4
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return 12
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 0
|
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = []
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[list[int]] , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) ):
if board[i][j] == 1:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[list[int]] , __lowerCamelCase: int ):
'''simple docstring'''
if row >= len(lowerCAmelCase__ ):
solution.append(lowerCAmelCase__ )
printboard(lowerCAmelCase__ )
print()
return True
for i in range(len(lowerCAmelCase__ ) ):
if is_safe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase_ = 1
solve(lowerCAmelCase__ , row + 1 )
lowercase_ = 0
return False
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[list[int]] ):
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
for j in range(len(lowerCAmelCase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 360
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any=0 ):
'''simple docstring'''
os.makedirs(a__ , exist_ok=a__ )
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
lowercase_ = os.path.join(a__ , a__ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(a__ , a__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
lowercase_ = os.path.join(a__ , a__ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(a__ , a__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ = os.path.join(a__ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(a__ , exist_ok=a__ )
logger.info(F'Saving model to {ckpt_dir}' )
lowercase_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=a__ , storage_writer=dist_cp.FileSystemWriter(a__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: str=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(a__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
lowercase_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
lowercase_ = os.path.join(a__ , a__ )
logger.info(F'Loading model from {input_model_file}' )
lowercase_ = torch.load(a__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
lowercase_ = os.path.join(a__ , a__ )
logger.info(F'Loading model from {input_model_file}' )
lowercase_ = torch.load(a__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ = (
os.path.join(a__ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
lowercase_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=a__ , storage_reader=dist_cp.FileSystemReader(a__ ) , planner=DefaultLoadPlanner() , )
lowercase_ = state_dict["model"]
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(a__ )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: List[str]=0 ):
'''simple docstring'''
os.makedirs(a__ , exist_ok=a__ )
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ = FSDP.optim_state_dict(a__ , a__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase_ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
lowercase_ = os.path.join(a__ , a__ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(a__ , a__ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
lowercase_ = os.path.join(a__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(a__ , exist_ok=a__ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(a__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: str=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase_ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
lowercase_ = os.path.join(a__ , a__ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
lowercase_ = torch.load(a__ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
lowercase_ = (
os.path.join(a__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
lowercase_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(a__ ) , )
lowercase_ = optim_state["optimizer"]
logger.info(F'Optimizer loaded from {ckpt_dir}' )
lowercase_ = FSDP.optim_state_dict_to_load(a__ , a__ , a__ )
optimizer.load_state_dict(a__ )
| 361
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = XGBClassifier()
classifier.fit(lowercase_ , lowercase_ )
return classifier
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = load_iris()
lowercase_ , lowercase_ = data_handling(lowercase_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = train_test_split(
lowercase_ , lowercase_ , test_size=0.25 )
lowercase_ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowercase_ = xgboost(lowercase_ , lowercase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase_ , lowercase_ , lowercase_ , display_labels=lowercase_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 362
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 0
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
SCREAMING_SNAKE_CASE__ = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@classmethod
def A__ ( cls ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = TOKEN
HfFolder.save_token(_a )
@classmethod
def A__ ( cls ) -> List[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase_ = FlaxBertModel(_a )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id="test-model-flax" , push_to_hub=_a , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=F'{key} not identical' )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase_ = FlaxBertModel(_a )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id="valid_org/test-model-flax-org" , push_to_hub=_a , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1e-3 , msg=F'{key} not identical' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = True
lowercase_ = flatten_dict(modela.params )
lowercase_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowercase_ = False
return models_are_equal
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowercase_ = FlaxBertModel(_a )
lowercase_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
lowercase_ = FlaxBertModel.from_pretrained(_a )
lowercase_ = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowercase_ = FlaxBertModel(_a )
lowercase_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size="10KB" )
with self.assertRaises(_a ):
lowercase_ = FlaxBertModel.from_pretrained(_a )
lowercase_ = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = "bert"
lowercase_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(_a ):
lowercase_ = FlaxBertModel.from_pretrained(_a )
lowercase_ = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = "bert"
lowercase_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(_a ):
lowercase_ = FlaxBertModel.from_pretrained(_a )
lowercase_ = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 363
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ = 'vision-encoder-decoder'
lowerCAmelCase__ = True
def __init__( self , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
lowercase_ = kwargs.pop("encoder" )
lowercase_ = encoder_config.pop("model_type" )
lowercase_ = kwargs.pop("decoder" )
lowercase_ = decoder_config.pop("model_type" )
lowercase_ = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowercase_ = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowercase_ = True
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> PretrainedConfig:
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase_ = True
lowercase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.encoder.to_dict()
lowercase_ = self.decoder.to_dict()
lowercase_ = self.__class__.model_type
return output
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ = version.parse("1.11" )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase_ = OrderedDict()
lowercase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
lowercase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
lowercase_ = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def A__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
import torch
lowercase_ = OrderedDict()
lowercase_ = super().generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
lowercase_ = dummy_input["input_ids"].shape
lowercase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase_ = dummy_input.pop("input_ids" )
lowercase_ = dummy_input.pop("attention_mask" )
lowercase_ = torch.zeros(_SCREAMING_SNAKE_CASE )
return common_inputs
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def A__ ( self ) -> None:
'''simple docstring'''
pass
def A__ ( self , UpperCAmelCase ) -> OnnxConfig:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(_SCREAMING_SNAKE_CASE )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "default" ) -> OnnxConfig:
'''simple docstring'''
lowercase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 364
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
@lru_cache()
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ = bs[:]
lowercase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase_ = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
class __lowerCamelCase ( snake_case__ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
lowercase_ = json.load(UpperCAmelCase_ )
lowercase_ = {v: k for k, v in self.encoder.items()}
lowercase_ = errors # how to handle errors in decoding
lowercase_ = bytes_to_unicode()
lowercase_ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding="utf-8" ) as merges_handle:
lowercase_ = merges_handle.read().split("\n" )[1:-1]
lowercase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowercase_ = {}
lowercase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase_ = tuple(UpperCAmelCase_ )
lowercase_ = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
lowercase_ = min(UpperCAmelCase_ , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(UpperCAmelCase_ ):
try:
lowercase_ = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ = tuple(UpperCAmelCase_ )
lowercase_ = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowercase_ = get_pairs(UpperCAmelCase_ )
lowercase_ = " ".join(UpperCAmelCase_ )
lowercase_ = word
return word
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for token in re.findall(self.pat , UpperCAmelCase_ ):
lowercase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_ ).split(" " ) )
return bpe_tokens
def A__ ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase_ )
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = "".join(UpperCAmelCase_ )
lowercase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Dict:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + "\n" )
lowercase_ = 0
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
lowercase_ = token_index
writer.write(" ".join(UpperCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[str]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_ ) > 0 and not text[0].isspace()):
lowercase_ = " " + text
return (text, kwargs)
| 365
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 0
|
import math
SCREAMING_SNAKE_CASE__ = 1_0
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = BALLS_PER_COLOUR * NUM_COLOURS
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 20 ):
'''simple docstring'''
lowercase_ = math.comb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE_ )
lowercase_ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 366
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 0
|
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: bytes ):
'''simple docstring'''
if len(UpperCamelCase__ ) != 32:
raise ValueError("Input must be of length 32" )
lowercase_ = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
lowercase_ = format(UpperCamelCase__ , "08x" )[-8:]
lowercase_ = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: bytes ):
'''simple docstring'''
lowercase_ = B""
for char in message:
bit_string += format(UpperCamelCase__ , "08b" ).encode("utf-8" )
lowercase_ = format(len(UpperCamelCase__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: bytes ):
'''simple docstring'''
if len(UpperCamelCase__ ) % 512 != 0:
raise ValueError("Input must have length that\'s a multiple of 512" )
for pos in range(0 , len(UpperCamelCase__ ) , 512 ):
lowercase_ = bit_string[pos : pos + 512]
lowercase_ = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
lowercase_ = format(UpperCamelCase__ , "032b" )
lowercase_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase__ , 2 )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return (a + b) % 2**32
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: bytes ):
'''simple docstring'''
lowercase_ = preprocess(UpperCamelCase__ )
lowercase_ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowercase_ = 0x67_45_23_01
lowercase_ = 0xEF_CD_AB_89
lowercase_ = 0x98_BA_DC_FE
lowercase_ = 0x10_32_54_76
lowercase_ = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase__ ):
lowercase_ = aa
lowercase_ = ba
lowercase_ = ca
lowercase_ = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowercase_ = d ^ (b & (c ^ d))
lowercase_ = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowercase_ = c ^ (d & (b ^ c))
lowercase_ = (5 * i + 1) % 16
elif i <= 47:
lowercase_ = b ^ c ^ d
lowercase_ = (3 * i + 5) % 16
else:
lowercase_ = c ^ (b | not_aa(UpperCamelCase__ ))
lowercase_ = (7 * i) % 16
lowercase_ = (f + a + added_consts[i] + block_words[g]) % 2**32
lowercase_ = d
lowercase_ = c
lowercase_ = b
lowercase_ = sum_aa(UpperCamelCase__ , left_rotate_aa(UpperCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
lowercase_ = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( A_ , A_ , A_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , )
lowercase_ = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowercase_ = CLIPTextModel(_lowerCamelCase )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
lowercase_ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(_lowerCamelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(_lowerCamelCase )
else:
lowercase_ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowercase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableDiffusionInpaintPipeline(**_lowerCamelCase )
lowercase_ = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase_ = self.get_dummy_inputs(_lowerCamelCase )
lowercase_ = sd_pipe(**_lowerCamelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
lowercase_ = "stabilityai/stable-diffusion-2-inpainting"
lowercase_ = StableDiffusionInpaintPipeline.from_pretrained(_lowerCamelCase , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
lowercase_ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , generator=_lowerCamelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
lowercase_ = "stabilityai/stable-diffusion-2-inpainting"
lowercase_ = StableDiffusionInpaintPipeline.from_pretrained(
_lowerCamelCase , torch_dtype=torch.floataa , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
lowercase_ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , generator=_lowerCamelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase_ = "stabilityai/stable-diffusion-2-inpainting"
lowercase_ = PNDMScheduler.from_pretrained(_lowerCamelCase , subfolder="scheduler" )
lowercase_ = StableDiffusionInpaintPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , scheduler=_lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase_ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 368
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 0
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ = "pytorch_model.bin"
@dataclasses.dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowerCAmelCase__ = dataclasses.field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowerCAmelCase__ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowerCAmelCase__ = dataclasses.field(
default=snake_case_ , metadata={"help": "A csv or a json file containing the validation data."} )
lowerCAmelCase__ = dataclasses.field(
default=snake_case_ , metadata={"help": "The name of the task to train on."} , )
lowerCAmelCase__ = dataclasses.field(
default=snake_case_ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowerCAmelCase__ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
lowerCAmelCase__ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
lowerCAmelCase__ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowerCAmelCase__ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
lowerCAmelCase__ = dataclasses.field(
default=snake_case_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
lowerCAmelCase__ = dataclasses.field(
default=snake_case_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
lowerCAmelCase__ = dataclasses.field(
default=snake_case_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
lowerCAmelCase__ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
lowerCAmelCase__ = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowerCAmelCase__ = dataclasses.field(
default=snake_case_ , metadata={"help": "Random seed for initialization."} , )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowercase_ = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowercase_ = int(eval_result * len(_snake_case ) )
print(_snake_case )
lowercase_ = dataset.sort("probability" , reverse=_snake_case )
lowercase_ = dataset.select(range(_snake_case ) )
lowercase_ = dataset.remove_columns(["label", "probability"] )
lowercase_ = dataset.rename_column("prediction" , "label" )
lowercase_ = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} )
lowercase_ = dataset.shuffle(seed=args.seed )
lowercase_ = os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , **__lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowercase_ = STModelArguments(model_name_or_path=_snake_case )
lowercase_ = STDataArguments(train_file=_snake_case , infer_file=_snake_case )
lowercase_ = STTrainingArguments(output_dir=_snake_case )
lowercase_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
lowercase_ = {}
lowercase_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowercase_ = args.train_file
lowercase_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowercase_ = args.eval_file
for key in data_files:
lowercase_ = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
lowercase_ = extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
lowercase_ = F'{args.output_dir}/self-train_iter-{{}}'.format
lowercase_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
lowercase_ = None
lowercase_ = None
lowercase_ = 0
lowercase_ = False
# Show the progress bar
lowercase_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowercase_ = data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowercase_ = os.path.join(_snake_case , "stage-1" )
lowercase_ = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
lowercase_ = os.path.join(_snake_case , "best-checkpoint" , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , _snake_case , _snake_case , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 1." , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowercase_ = os.path.join(_snake_case , "best-checkpoint" )
lowercase_ = os.path.join(_snake_case , "stage-2" )
# Update arguments_dict
lowercase_ = model_path
lowercase_ = data_files["train"]
lowercase_ = current_output_dir
lowercase_ = os.path.join(_snake_case , "best-checkpoint" , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , _snake_case , _snake_case , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 2." , _snake_case )
lowercase_ = iteration
lowercase_ = data_dir_format(iteration + 1 )
lowercase_ = AutoConfig.from_pretrained(os.path.join(_snake_case , "best-checkpoint" ) )
lowercase_ = config.idalabel
lowercase_ = os.path.join(_snake_case , "eval_results_best-checkpoint.json" )
lowercase_ = os.path.join(_snake_case , "test_results_best-checkpoint.json" )
assert os.path.exists(_snake_case )
with open(_snake_case , "r" ) as f:
lowercase_ = float(json.load(_snake_case )[args.eval_metric] )
lowercase_ = os.path.join(_snake_case , "infer_output_best-checkpoint.csv" )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
lowercase_ = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
lowercase_ = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
lowercase_ = os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowercase_ = eval_result
if best_iteration is None:
lowercase_ = new_iteration
lowercase_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowercase_ = new_iteration
lowercase_ = new_eval_result
lowercase_ = 0
else:
if new_eval_result == best_eval_result:
lowercase_ = new_iteration
lowercase_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowercase_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , _snake_case )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , "eval_results_best-iteration.json" ) , )
| 369
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase_ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowercase_ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowercase_ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowercase_ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowercase_ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowercase_ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase_ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowercase_ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowercase_ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowercase_ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowercase_ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase_ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowercase_ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowercase_ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowercase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowercase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowercase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowercase_ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowercase_ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowercase_ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase_ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowercase_ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowercase_ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowercase_ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase_ = key.split("." )
lowercase_ = int(key_split[2] ), int(key_split[4] )
lowercase_ = config.vision_config.hidden_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase_ = key.split("." )
lowercase_ = int(key_split[3] )
lowercase_ = config.text_config.hidden_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[
dim : dim * 2, :
]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = rename_key(__lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase_ = val.squeeze_()
else:
lowercase_ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple="groupvit-gcc-yfcc" , __lowerCamelCase: List[Any]=False ):
'''simple docstring'''
lowercase_ = GroupViTConfig()
lowercase_ = GroupViTModel(__lowerCamelCase ).eval()
lowercase_ = torch.load(__lowerCamelCase , map_location="cpu" )["model"]
lowercase_ = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
lowercase_ = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowerCamelCase ) == 0)
# verify result
lowercase_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowercase_ = prepare_img()
lowercase_ = processor(text=["a photo of a cat", "a photo of a dog"] , images=__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
with torch.no_grad():
lowercase_ = model(**__lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowercase_ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase_ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , __lowerCamelCase , atol=1E-3 )
processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
print("Successfully saved processor and model to" , __lowerCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(__lowerCamelCase , organization="nielsr" )
model.push_to_hub(__lowerCamelCase , organization="nielsr" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 370
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 0
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = CodeGenTokenizer
lowerCAmelCase__ = CodeGenTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = {"add_prefix_space": True}
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowercase_ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ = {"""unk_token""": """<unk>"""}
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def A__ ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = """lower newer"""
lowercase_ = """lower newer"""
return input_text, output_text
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ = """lower newer"""
lowercase_ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase_ = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def A__ ( self ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
lowercase_ = """lower newer"""
# Testing tokenization
lowercase_ = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
lowercase_ = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
lowercase_ = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
lowercase_ = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
lowercase_ = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
lowercase_ = tokenizer.encode(snake_case_ , add_prefix_space=snake_case_ )
lowercase_ = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing the unknown token
lowercase_ = tokens + [rust_tokenizer.unk_token]
lowercase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
'''simple docstring'''
pass
def A__ ( self , UpperCAmelCase=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# Simple input
lowercase_ = """This is a simple input"""
lowercase_ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ = ("""This is a simple input""", """This is a pair""")
lowercase_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="max_length" )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="max_length" )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="max_length" , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="max_length" )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="max_length" )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="max_length" , )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase_ = """This is a simple input"""
lowercase_ = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowercase_ = ("""This is a simple input""", """This is a pair""")
lowercase_ = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer(snake_case_ , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase_ = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors="np" )
lowercase_ = tokenizer(*snake_case_ , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase_ = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = """$$$"""
lowercase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case_ , add_bos_token=snake_case_ )
lowercase_ = """This is a simple input"""
lowercase_ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer(snake_case_ )
lowercase_ = tokenizer(snake_case_ )
self.assertEqual(out_s.input_ids[0] , snake_case_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase_ = tokenizer.decode(out_s.input_ids )
lowercase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowercase_ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
lowercase_ = """\nif len_a > len_b: result = a\nelse: result = b"""
lowercase_ = tokenizer.encode(snake_case_ )
lowercase_ = ["""^#""", re.escape("<|endoftext|>" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
lowercase_ = tokenizer.decode(snake_case_ , truncate_before_pattern=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
pass
| 371
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] = 100 ):
'''simple docstring'''
lowercase_ = set()
lowercase_ = 0
lowercase_ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
lowercase_ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 350
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
lowercase_ = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase_ = True
elif name.split("." )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(_UpperCAmelCase )[0].split("." )[-2]
lowercase_ = mapped_key.replace("*" , _UpperCAmelCase )
if "weight_g" in name:
lowercase_ = 'weight_g'
elif "weight_v" in name:
lowercase_ = 'weight_v'
elif "bias" in name:
lowercase_ = 'bias'
elif "weight" in name:
lowercase_ = 'weight'
else:
lowercase_ = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple ):
'''simple docstring'''
lowercase_ = full_name.split("conv_layers." )[-1]
lowercase_ = name.split("." )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowercase_ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
with open(_UpperCAmelCase , "r" , encoding="utf-8" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(" " )[0] for line in lines]
lowercase_ = len(_UpperCAmelCase )
lowercase_ = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_UpperCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: str , ):
'''simple docstring'''
lowercase_ = WavaVecaConfig.from_pretrained(_UpperCAmelCase )
lowercase_ = SpeechaTextaConfig.from_pretrained(
_UpperCAmelCase , vocab_size=_UpperCAmelCase , decoder_layers=_UpperCAmelCase , do_stable_layer_norm=_UpperCAmelCase )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(_UpperCAmelCase )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , _UpperCAmelCase )
lowercase_ = SpeechaTextaForCausalLM(_UpperCAmelCase )
lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCAmelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase_ = SpeechEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
lowercase_ = SpeechaTextaTokenizer(os.path.join(_UpperCAmelCase , "vocab.json" ) )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = 'speech_to_text_2'
lowercase_ = 'wav2vec2'
lowercase_ = SpeechEncoderDecoderConfig.from_dict(_UpperCAmelCase )
hf_wavavec.save_pretrained(_UpperCAmelCase )
feature_extractor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0_2_2_4, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 351
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=128 , UpperCAmelCase=32 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ) -> int:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def A__ ( self ) -> str:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = self.prepare_config_and_inputs()
lowercase_ = True
lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = NezhaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
lowercase_ = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
lowercase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase_ = True
lowercase_ = NezhaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
lowercase_ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
lowercase_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = NezhaForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = NezhaForNextSentencePrediction(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = NezhaForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , next_sentence_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = NezhaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = NezhaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = NezhaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.num_choices
lowercase_ = NezhaForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = NezhaModelTester(self )
lowercase_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ = None
self.model_tester.create_and_check_model_as_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> int:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = NezhaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase_ = True
lowercase_ = model_class(config=__SCREAMING_SNAKE_CASE )
lowercase_ = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ = torch.jit.trace(
__SCREAMING_SNAKE_CASE , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , "bert.pt" ) )
lowercase_ = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , "bert.pt" ) , map_location=__SCREAMING_SNAKE_CASE )
loaded(inputs_dict["input_ids"].to(__SCREAMING_SNAKE_CASE ) , inputs_dict["attention_mask"].to(__SCREAMING_SNAKE_CASE ) )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
lowercase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
lowercase_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowercase_ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
lowercase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
lowercase_ = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowercase_ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 352
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = pd.read_csv("""sample_data.csv""", header=None)
SCREAMING_SNAKE_CASE__ = df.shape[:1][0]
# If you're using some other dataset input the target column
SCREAMING_SNAKE_CASE__ = df.iloc[:, 1:2]
SCREAMING_SNAKE_CASE__ = actual_data.values.reshape(len_data, 1)
SCREAMING_SNAKE_CASE__ = MinMaxScaler().fit_transform(actual_data)
SCREAMING_SNAKE_CASE__ = 1_0
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 2_0
SCREAMING_SNAKE_CASE__ = len_data - periods * look_back
SCREAMING_SNAKE_CASE__ = actual_data[:division]
SCREAMING_SNAKE_CASE__ = actual_data[division - look_back :]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
SCREAMING_SNAKE_CASE__ = np.array(train_x)
SCREAMING_SNAKE_CASE__ = np.array(test_x)
SCREAMING_SNAKE_CASE__ = np.array([list(i.ravel()) for i in train_y])
SCREAMING_SNAKE_CASE__ = np.array([list(i.ravel()) for i in test_y])
SCREAMING_SNAKE_CASE__ = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
SCREAMING_SNAKE_CASE__ = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
SCREAMING_SNAKE_CASE__ = model.predict(x_test)
| 353
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowerCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = "mobilenet_v1"
def __init__( self , UpperCAmelCase=3 , UpperCAmelCase=224 , UpperCAmelCase=1.0 , UpperCAmelCase=8 , UpperCAmelCase="relu6" , UpperCAmelCase=True , UpperCAmelCase=0.999 , UpperCAmelCase=0.02 , UpperCAmelCase=0.001 , **UpperCAmelCase , ) -> List[str]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = depth_multiplier
lowercase_ = min_depth
lowercase_ = hidden_act
lowercase_ = tf_padding
lowercase_ = classifier_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
class __lowerCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = version.parse("1.11" )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 1e-4
| 355
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __lowerCamelCase ( __lowerCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 65536 , UpperCAmelCase = None , UpperCAmelCase = 2 , UpperCAmelCase = 2 , UpperCAmelCase = 0 , UpperCAmelCase = "fourier" , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = 0.0 , UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase = "UNetMidBlock1D" , UpperCAmelCase = None , UpperCAmelCase = (32, 32, 64) , UpperCAmelCase = None , UpperCAmelCase = 8 , UpperCAmelCase = 1 , UpperCAmelCase = False , ) -> int:
'''simple docstring'''
super().__init__()
lowercase_ = sample_size
# time
if time_embedding_type == "fourier":
lowercase_ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
lowercase_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase_ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
lowercase_ = block_out_channels[0]
if use_timestep_embedding:
lowercase_ = block_out_channels[0] * 4
lowercase_ = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
lowercase_ = nn.ModuleList([] )
lowercase_ = None
lowercase_ = nn.ModuleList([] )
lowercase_ = None
# down
lowercase_ = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
lowercase_ = output_channel
lowercase_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase_ = i == len(lowerCamelCase__ ) - 1
lowercase_ = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
lowercase_ = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
lowercase_ = list(reversed(lowerCamelCase__ ) )
lowercase_ = reversed_block_out_channels[0]
if out_block_type is None:
lowercase_ = out_channels
else:
lowercase_ = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
lowercase_ = output_channel
lowercase_ = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
lowercase_ = i == len(lowerCamelCase__ ) - 1
lowercase_ = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
lowercase_ = output_channel
# out
lowercase_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowercase_ = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
lowercase_ = timestep
if not torch.is_tensor(lowerCamelCase__ ):
lowercase_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
lowercase_ = timesteps[None].to(sample.device )
lowercase_ = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
lowercase_ = self.time_mlp(lowerCamelCase__ )
else:
lowercase_ = timestep_embed[..., None]
lowercase_ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase_ = ()
for downsample_block in self.down_blocks:
lowercase_ = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase_ = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase_ = down_block_res_samples[-1:]
lowercase_ = down_block_res_samples[:-1]
lowercase_ = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
lowercase_ = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 357
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 0
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=-1 ) -> Any:
'''simple docstring'''
lowercase_ = label_idx
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
lowercase_ = mode.value
lowercase_ = os.path.join(lowercase__ , F'{mode}.txt' )
lowercase_ = 1
lowercase_ = []
with open(lowercase__ , encoding="utf-8" ) as f:
lowercase_ = []
lowercase_ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
lowercase_ = []
lowercase_ = []
else:
lowercase_ = line.split(" " )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
return examples
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase_ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(lowercase__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for \'%s\'." , line.split()[0] )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
if path:
with open(lowercase__ , "r" ) as f:
lowercase_ = f.read().splitlines()
if "O" not in labels:
lowercase_ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__(label_idx=-2 )
def A__ ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
if path:
with open(lowercase__ , "r" ) as f:
lowercase_ = f.read().splitlines()
if "O" not in labels:
lowercase_ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
lowercase_ = mode.value
lowercase_ = os.path.join(lowercase__ , F'{mode}.txt' )
lowercase_ = 1
lowercase_ = []
with open(lowercase__ , encoding="utf-8" ) as f:
for sentence in parse_incr(lowercase__ ):
lowercase_ = []
lowercase_ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
for sentence in parse_incr(lowercase__ ):
lowercase_ = preds_list[example_id]
lowercase_ = ""
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase__ )
example_id += 1
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 358
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 0
|
from functools import reduce
SCREAMING_SNAKE_CASE__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __lowerCamelCase , __lowerCamelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 0
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = Path(__UpperCamelCase ) / "preprocessor_config.json"
lowercase_ = Path(__UpperCamelCase ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__UpperCamelCase , "w" ) , )
json.dump({"model_type": "clip"} , open(__UpperCamelCase , "w" ) )
lowercase_ = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = Path(__UpperCamelCase ) / "preprocessor_config.json"
lowercase_ = Path(__UpperCamelCase ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__UpperCamelCase , "w" ) , )
json.dump({"model_type": "clip"} , open(__UpperCamelCase , "w" ) )
lowercase_ = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase_ = Path(__UpperCamelCase ) / "preprocessor_config.json"
lowercase_ = Path(__UpperCamelCase ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__UpperCamelCase , "w" ) , )
json.dump({"model_type": "clip"} , open(__UpperCamelCase , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase_ = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop("image_processor_type" )
lowercase_ = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
lowercase_ = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
lowercase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = Path(__UpperCamelCase ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__UpperCamelCase , "w" ) , )
lowercase_ = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , "clip-base is not a local folder and is not a valid model identifier" ):
lowercase_ = AutoImageProcessor.from_pretrained("clip-base" )
def A__ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase_ = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision="aaaaaa" )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
lowercase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
lowercase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
lowercase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__UpperCamelCase )
lowercase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
lowercase_ = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
try:
AutoConfig.register("custom" , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = Path(__UpperCamelCase ) / "preprocessor_config.json"
lowercase_ = Path(__UpperCamelCase ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__UpperCamelCase , "w" ) , )
json.dump({"model_type": "clip"} , open(__UpperCamelCase , "w" ) )
lowercase_ = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
lowercase_ = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self ) -> Any:
'''simple docstring'''
class __lowerCamelCase ( _lowercase ):
"""simple docstring"""
lowerCAmelCase__ = True
try:
AutoConfig.register("custom" , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
lowercase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase_ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__UpperCamelCase , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 360
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
"""simple docstring"""
lowerCAmelCase__ = '''encoder-decoder'''
lowerCAmelCase__ = True
def __init__( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase_ = kwargs.pop("encoder" )
lowercase_ = encoder_config.pop("model_type" )
lowercase_ = kwargs.pop("decoder" )
lowercase_ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowercase_ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = True
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> PretrainedConfig:
'''simple docstring'''
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase_ = True
lowercase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.encoder.to_dict()
lowercase_ = self.decoder.to_dict()
lowercase_ = self.__class__.model_type
return output
| 361
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 0
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int=() , __lowerCamelCase: str=None , __lowerCamelCase: Dict="no" , __lowerCamelCase: Any="29500" ):
'''simple docstring'''
lowercase_ = False
lowercase_ = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
lowercase_ = True
elif "IPython" in sys.modules:
lowercase_ = """google.colab""" in str(sys.modules["IPython"].get_ipython() )
try:
lowercase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , __lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
lowercase_ = 8
lowercase_ = PrepareForLaunch(__lowerCAmelCase , distributed_type="TPU" )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(__lowerCAmelCase , args=__lowerCAmelCase , nprocs=__lowerCAmelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCAmelCase , master_addr="127.0.01" , master_port=__lowerCAmelCase , mixed_precision=__lowerCAmelCase ):
lowercase_ = PrepareForLaunch(__lowerCAmelCase , distributed_type="MULTI_GPU" )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(__lowerCAmelCase , args=__lowerCAmelCase , nprocs=__lowerCAmelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowercase_ = """1"""
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Union[str, Any]=() , __lowerCamelCase: Any=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCAmelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
lowercase_ = PrepareForLaunch(__lowerCAmelCase , debug=__lowerCAmelCase )
start_processes(__lowerCAmelCase , args=__lowerCAmelCase , nprocs=__lowerCAmelCase , start_method="fork" )
| 362
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 0
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __lowerCamelCase ( _A):
"""simple docstring"""
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def A__ ( self ) -> int:
'''simple docstring'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def A__ ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def A__ ( self ) -> str:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def A__ ( self ) -> str:
'''simple docstring'''
import PIL.Image
lowercase_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=__SCREAMING_SNAKE_CASE ) as mock_cast_to_python_objects:
lowercase_ = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
lowercase_ , lowercase_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , __SCREAMING_SNAKE_CASE )
self.assertFalse(kwargs["optimize_list_casting"] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = pa.BufferReader(lowercase__ ) if isinstance(lowercase__ , pa.Buffer ) else pa.memory_map(lowercase__ )
lowercase_ = pa.ipc.open_stream(lowercase__ )
lowercase_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = pa.BufferOutputStream()
lowercase_ = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowercase_ , lowercase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = pa.BufferOutputStream()
lowercase_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=lowercase__ , features=lowercase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
lowercase_ , lowercase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowercase_ = pa.BufferReader(output.getvalue() )
lowercase_ = pa.ipc.open_stream(lowercase__ )
lowercase_ = f.read_all()
lowercase_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowercase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] ):
'''simple docstring'''
lowercase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="split_name" , check_duplicates=lowercase__ , ) as writer:
with pytest.raises(lowercase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
lowercase_ , lowercase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="split_name" , check_duplicates=lowercase__ , ) as writer:
with pytest.raises(lowercase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
lowercase_ , lowercase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="split_name" , check_duplicates=lowercase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
lowercase_ , lowercase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = pa.BufferOutputStream()
lowercase_ = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
lowercase_ , lowercase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[Any] ):
'''simple docstring'''
lowercase_ = pa.BufferOutputStream()
lowercase_ = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
lowercase_ , lowercase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
lowercase_ = pa.BufferOutputStream()
lowercase_ = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
lowercase_ , lowercase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
lowercase_ = os.path.join(lowercase__ , "test.arrow" )
with ArrowWriter(path=lowercase__ , schema=pa.schema(lowercase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
lowercase_ , lowercase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(lowercase__ , 1 )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
if pa.types.is_list(lowercase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
if isinstance(lst[0] , lowercase__ ):
change_first_primitive_element_in_list(lst[0] , lowercase__ )
else:
lowercase_ = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = pa.array(TypedSequence(lowercase__ , optimized_int_type=lowercase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowercase_ = copy.deepcopy(lowercase__ )
lowercase_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowercase__ , lowercase__ )
lowercase_ = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Tuple ):
'''simple docstring'''
lowercase_ = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=lowercase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = "mock://dataset-train.arrow"
with ArrowWriter(path=lowercase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowercase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowercase_ , lowercase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowercase__ )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = pa.BufferOutputStream()
with ParquetWriter(stream=lowercase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowercase_ , lowercase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowercase_ = pa.BufferReader(output.getvalue() )
lowercase_ = pq.read_table(lowercase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: int ):
'''simple docstring'''
import PIL.Image
lowercase_ = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowercase__ , format="png" )
lowercase_ = pa.BufferOutputStream()
with ParquetWriter(
stream=lowercase__ , features=Features({"image": Image()} ) , embed_local_files=lowercase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
lowercase_ = pa.BufferReader(output.getvalue() )
lowercase_ = pq.read_table(lowercase__ )
lowercase_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , lowercase__ )
with open(lowercase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = pa.schema([pa.field("col_1" , pa.string() , nullable=lowercase__ )] )
lowercase_ = pa.BufferOutputStream()
with ArrowWriter(stream=lowercase__ ) as writer:
writer._build_writer(inferred_schema=lowercase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 363
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '▁'
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
SCREAMING_SNAKE_CASE__ = {
'google/pegasus-xsum': 5_1_2,
}
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PegasusTokenizer
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=103 , **UpperCAmelCase , ) -> Any:
'''simple docstring'''
lowercase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
F'additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'
F' {type(_SCREAMING_SNAKE_CASE )}' )
lowercase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowercase_ = additional_special_tokens_extended
else:
lowercase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 364
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __lowerCamelCase ( __snake_case , __snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 768 , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase_ = nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
lowercase_ = nn.Parameter(torch.ones(1 , lowerCamelCase_ ) )
def A__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Dict:
'''simple docstring'''
lowercase_ = nn.Parameter(self.mean.to(lowerCamelCase_ ).to(lowerCamelCase_ ) )
lowercase_ = nn.Parameter(self.std.to(lowerCamelCase_ ).to(lowerCamelCase_ ) )
return self
def A__ ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = (embeds * self.std) + self.mean
return embeds
| 365
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 0
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = jnp.ones((batch_size, length) ) / length
return scores
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = None
lowercase_ = 20
lowercase_ = self._get_uniform_logits(batch_size=2 , length=_lowercase )
# tweak scores to not be uniform anymore
lowercase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase_ = jax.nn.softmax(_lowercase , axis=-1 )
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase_ = jax.nn.softmax(temp_dist_warper_sharper(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
lowercase_ = jax.nn.softmax(temp_dist_warper_smoother(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = None
lowercase_ = 10
lowercase_ = 2
# create ramp distribution
lowercase_ = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy()
lowercase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase_ = 5
lowercase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowercase_ = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, length) ).copy()
lowercase_ = top_k_warp_safety_check(_lowercase , _lowercase , cur_len=_lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = None
lowercase_ = 10
lowercase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
lowercase_ = np.exp(top_p_warp(_lowercase , _lowercase , cur_len=_lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowercase_ = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase_ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowercase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowercase_ = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = 20
lowercase_ = 4
lowercase_ = 0
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
# check that min length is applied at length 5
lowercase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowercase_ = 5
lowercase_ = self._get_uniform_logits(_lowercase , _lowercase )
lowercase_ = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowercase_ = self._get_uniform_logits(_lowercase , _lowercase )
lowercase_ = 15
lowercase_ = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = 20
lowercase_ = 4
lowercase_ = 0
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
# check that all scores are -inf except the bos_token_id score
lowercase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowercase_ = 1
lowercase_ = self._get_uniform_logits(_lowercase , _lowercase )
lowercase_ = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase_ = 3
lowercase_ = self._get_uniform_logits(_lowercase , _lowercase )
lowercase_ = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = 20
lowercase_ = 4
lowercase_ = 0
lowercase_ = 5
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowercase_ = 4
lowercase_ = self._get_uniform_logits(_lowercase , _lowercase )
lowercase_ = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase_ = 3
lowercase_ = self._get_uniform_logits(_lowercase , _lowercase )
lowercase_ = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = 4
lowercase_ = 10
lowercase_ = 15
lowercase_ = 2
lowercase_ = 1
lowercase_ = 15
# dummy input_ids and scores
lowercase_ = ids_tensor((batch_size, sequence_length) , _lowercase )
lowercase_ = input_ids.copy()
lowercase_ = self._get_uniform_logits(_lowercase , _lowercase )
lowercase_ = scores.copy()
# instantiate all dist processors
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
lowercase_ = 10
# no processor list
lowercase_ = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
# with processor list
lowercase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ = processor(_lowercase , _lowercase , cur_len=_lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 4
lowercase_ = 10
lowercase_ = 15
lowercase_ = 2
lowercase_ = 1
lowercase_ = 15
# dummy input_ids and scores
lowercase_ = ids_tensor((batch_size, sequence_length) , _lowercase )
lowercase_ = input_ids.copy()
lowercase_ = self._get_uniform_logits(_lowercase , _lowercase )
lowercase_ = scores.copy()
# instantiate all dist processors
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
lowercase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase_ = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
lowercase_ = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ = processor(_lowercase , _lowercase , cur_len=_lowercase )
return scores
lowercase_ = jax.jit(_lowercase )
lowercase_ = jax.jit(_lowercase )
lowercase_ = jitted_run_no_processor_list(_lowercase , _lowercase , _lowercase )
lowercase_ = jitted_run_processor_list(_lowercase , _lowercase , _lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 366
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 0
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello world! cécé herlolip"""
SCREAMING_SNAKE_CASE__ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple ):
'''simple docstring'''
lowercase_ = BertAbsConfig(
temp_dir="." , finetune_bert=SCREAMING_SNAKE_CASE__ , large=SCREAMING_SNAKE_CASE__ , share_emb=SCREAMING_SNAKE_CASE__ , use_bert_emb=SCREAMING_SNAKE_CASE__ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowercase_ = torch.load(SCREAMING_SNAKE_CASE__ , lambda __lowerCamelCase , __lowerCamelCase : storage )
lowercase_ = AbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("cpu" ) , SCREAMING_SNAKE_CASE__ )
original.eval()
lowercase_ = BertAbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
lowercase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
lowercase_ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
lowercase_ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
lowercase_ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
lowercase_ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowercase_ = encoder_input_ids
lowercase_ = decoder_input_ids
lowercase_ = lowercase_ = None
lowercase_ = None
lowercase_ = lowercase_ = None
lowercase_ = lowercase_ = None
lowercase_ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowercase_ = original(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
lowercase_ = original.generator(SCREAMING_SNAKE_CASE__ )
lowercase_ = new_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
lowercase_ = new_model.generator(SCREAMING_SNAKE_CASE__ )
lowercase_ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(SCREAMING_SNAKE_CASE__ ) )
lowercase_ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(SCREAMING_SNAKE_CASE__ ) )
lowercase_ = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 367
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 368
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 0
|
from math import pi, sqrt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(a__ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(a__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
assert gamma(0.5 ) == sqrt(a__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE__ = 1.0
while num:
SCREAMING_SNAKE_CASE__ = float(input("""Gamma of: """))
print(f"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 369
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 0
|
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = Path(tmpdirname)
SCREAMING_SNAKE_CASE__ = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE__ = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE__ = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE__ = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE__ = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE__ = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE__ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE__ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 370
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
lowercase_ = self.unet.config.sample_size
lowercase_ = (batch_size, 3, img_size, img_size)
lowercase_ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowercase_ = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowercase_ = self.scheduler.schedule[t]
lowercase_ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowercase_ = self.scheduler.add_noise_to_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase_ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowercase_ = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase_ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowercase_ = self.scheduler.step_correct(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step_output.prev_sample , step_output["derivative"] , )
lowercase_ = step_output.prev_sample
lowercase_ = (sample / 2 + 0.5).clamp(0 , 1 )
lowercase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 371
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 350
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] ):
'''simple docstring'''
lowercase_ = []
lowercase_ = set({"(", "[", "{"} )
lowercase_ = set({")", "]", "}"} )
lowercase_ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = input("Enter sequence of brackets: " )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE , "is balanced" )
else:
print(_SCREAMING_SNAKE_CASE , "is not balanced" )
if __name__ == "__main__":
main()
| 351
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 0
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Any=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: List[Any]=0 ):
'''simple docstring'''
lowercase_ = []
for old_item in old_list:
lowercase_ = old_item.replace("in_layers.0" , "norm1" )
lowercase_ = new_item.replace("in_layers.2" , "conv1" )
lowercase_ = new_item.replace("out_layers.0" , "norm2" )
lowercase_ = new_item.replace("out_layers.3" , "conv2" )
lowercase_ = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowercase_ = new_item.replace("skip_connection" , "conv_shortcut" )
lowercase_ = shave_segments(UpperCamelCase__ , n_shave_prefix_segments=UpperCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=0 ):
'''simple docstring'''
lowercase_ = []
for old_item in old_list:
lowercase_ = old_item
lowercase_ = new_item.replace("norm.weight" , "group_norm.weight" )
lowercase_ = new_item.replace("norm.bias" , "group_norm.bias" )
lowercase_ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowercase_ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowercase_ = shave_segments(UpperCamelCase__ , n_shave_prefix_segments=UpperCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any]=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: List[Any]=None ):
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase_ = old_checkpoint[path]
lowercase_ = old_tensor.shape[0] // 3
lowercase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase_ = old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowercase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase_ = old_tensor.split(channels // num_heads , dim=1 )
lowercase_ = query.reshape(UpperCamelCase__ )
lowercase_ = key.reshape(UpperCamelCase__ )
lowercase_ = value.reshape(UpperCamelCase__ )
for path in paths:
lowercase_ = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowercase_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowercase_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase_ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase_ = old_checkpoint[path['''old''']][:, :, 0]
else:
lowercase_ = old_checkpoint[path['''old''']]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: Tuple ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = checkpoint['''time_embed.0.weight''']
lowercase_ = checkpoint['''time_embed.0.bias''']
lowercase_ = checkpoint['''time_embed.2.weight''']
lowercase_ = checkpoint['''time_embed.2.bias''']
lowercase_ = checkpoint['''input_blocks.0.0.weight''']
lowercase_ = checkpoint['''input_blocks.0.0.bias''']
lowercase_ = checkpoint['''out.0.weight''']
lowercase_ = checkpoint['''out.0.bias''']
lowercase_ = checkpoint['''out.2.weight''']
lowercase_ = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowercase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowercase_ = {
layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the middle blocks only
lowercase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowercase_ = {
layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the output blocks only
lowercase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowercase_ = {
layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
for i in range(1 , UpperCamelCase__ ):
lowercase_ = (i - 1) // (config['''num_res_blocks'''] + 1)
lowercase_ = (i - 1) % (config['''num_res_blocks'''] + 1)
lowercase_ = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key]
lowercase_ = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key]
if F'input_blocks.{i}.0.op.weight' in checkpoint:
lowercase_ = checkpoint[
F'input_blocks.{i}.0.op.weight'
]
lowercase_ = checkpoint[
F'input_blocks.{i}.0.op.bias'
]
continue
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
lowercase_ = {'''old''': F'input_blocks.{i}.0', '''new''': F'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
lowercase_ = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase__ )
if len(UpperCamelCase__ ):
lowercase_ = renew_attention_paths(UpperCamelCase__ )
lowercase_ = {
'''old''': F'input_blocks.{i}.1',
'''new''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
lowercase_ = {
F'input_blocks.{i}.1.qkv.bias': {
'''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'input_blocks.{i}.1.qkv.weight': {
'''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase__ , config=UpperCamelCase__ , )
lowercase_ = middle_blocks[0]
lowercase_ = middle_blocks[1]
lowercase_ = middle_blocks[2]
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , config=UpperCamelCase__ )
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , config=UpperCamelCase__ )
lowercase_ = renew_attention_paths(UpperCamelCase__ )
lowercase_ = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , attention_paths_to_split=UpperCamelCase__ , config=UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
lowercase_ = i // (config['''num_res_blocks'''] + 1)
lowercase_ = i % (config['''num_res_blocks'''] + 1)
lowercase_ = [shave_segments(UpperCamelCase__ , 2 ) for name in output_blocks[i]]
lowercase_ = {}
for layer in output_block_layers:
lowercase_ = layer.split("." )[0], shave_segments(UpperCamelCase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCamelCase__ )
else:
lowercase_ = [layer_name]
if len(UpperCamelCase__ ) > 1:
lowercase_ = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key]
lowercase_ = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key]
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
lowercase_ = renew_resnet_paths(UpperCamelCase__ )
lowercase_ = {'''old''': F'output_blocks.{i}.0', '''new''': F'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowercase_ = checkpoint[
F'output_blocks.{i}.{index}.conv.weight'
]
lowercase_ = checkpoint[
F'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCamelCase__ ) == 2:
lowercase_ = []
if len(UpperCamelCase__ ):
lowercase_ = renew_attention_paths(UpperCamelCase__ )
lowercase_ = {
'''old''': F'output_blocks.{i}.1',
'''new''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
lowercase_ = {
F'output_blocks.{i}.1.qkv.bias': {
'''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'output_blocks.{i}.1.qkv.weight': {
'''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=UpperCamelCase__ , )
else:
lowercase_ = renew_resnet_paths(UpperCamelCase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase_ = '''.'''.join(["output_blocks", str(UpperCamelCase__ ), path["old"]] )
lowercase_ = '''.'''.join(["up_blocks", str(UpperCamelCase__ ), "resnets", str(UpperCamelCase__ ), path["new"]] )
lowercase_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read())
SCREAMING_SNAKE_CASE__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
SCREAMING_SNAKE_CASE__ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
SCREAMING_SNAKE_CASE__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 352
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 353
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
| 0
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = logging.get_logger()
# the current default level is logging.WARNING
lowercase_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = logging.get_verbosity()
lowercase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase_ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCAmelCase ) as cl:
logger.warning(UpperCAmelCase )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCAmelCase ) as cl:
logger.warning(UpperCAmelCase )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCAmelCase ) as cl:
logger.warning(UpperCAmelCase )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(UpperCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowercase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase_ = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase )
lowercase_ = logging.log_levels[env_level_str]
lowercase_ = logging.get_verbosity()
self.assertEqual(
UpperCAmelCase , UpperCAmelCase , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
lowercase_ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def A__ ( self ) -> int:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
lowercase_ = logging.logging.getLogger()
with CaptureLogger(UpperCAmelCase ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def A__ ( self ) -> int:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
lowercase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowercase_ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCAmelCase ) as cl:
logger.warning_advice(UpperCAmelCase )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCAmelCase ) as cl:
logger.warning_advice(UpperCAmelCase )
self.assertEqual(cl.out , msg + "\n" )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 354
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 0
|
SCREAMING_SNAKE_CASE__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 355
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 0
|
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = 'src/transformers'
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(R"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(R"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _SCREAMING_SNAKE_CASE: Optional[int] ):
'''simple docstring'''
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
lowercase_ = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _SCREAMING_SNAKE_CASE: Any ):
'''simple docstring'''
with open(__UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase_ = f.readlines()
lowercase_ = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase_ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowercase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
lowercase_ = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
lowercase_ = re.findall("\[([^\]]+)\]" , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowercase_ = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
lowercase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowercase_ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowercase_ = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
lowercase_ = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(", " )
lowercase_ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
lowercase_ = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(", " )
lowercase_ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowercase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase_ = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowercase_ = lines[line_index]
lowercase_ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase_ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowercase_ = lines[line_index]
lowercase_ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Dict ):
'''simple docstring'''
def find_duplicates(_SCREAMING_SNAKE_CASE: str ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase_ = []
for key in import_dict_objects.keys():
lowercase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowercase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase_ = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
lowercase_ = os.path.join(__UpperCAmelCase , "__init__.py" )
lowercase_ = parse_init(__UpperCAmelCase )
if objects is not None:
lowercase_ = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowercase_ = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError("\n\n".join(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
lowercase_ = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
lowercase_ = short_path.replace(os.path.sep , "." )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
lowercase_ = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
lowercase_ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
SCREAMING_SNAKE_CASE__ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__UpperCAmelCase , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase_ = spec.loader.load_module()
lowercase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__UpperCAmelCase ) > 0:
lowercase_ = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 356
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 0
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list , __lowerCamelCase: list ):
'''simple docstring'''
if len(lowercase_ ) != 2 or len(a[0] ) != 2 or len(lowercase_ ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
lowercase_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list , __lowerCamelCase: list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase_ ) )
]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list , __lowerCamelCase: list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase_ ) )
]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list ):
'''simple docstring'''
if len(lowercase_ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
lowercase_ = len(lowercase_ )
lowercase_ = matrix_length // 2
lowercase_ = [[a[i][j] for j in range(lowercase_ , lowercase_ )] for i in range(lowercase_ )]
lowercase_ = [
[a[i][j] for j in range(lowercase_ , lowercase_ )] for i in range(lowercase_ , lowercase_ )
]
lowercase_ = [[a[i][j] for j in range(lowercase_ )] for i in range(lowercase_ )]
lowercase_ = [[a[i][j] for j in range(lowercase_ )] for i in range(lowercase_ , lowercase_ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list ):
'''simple docstring'''
return len(lowercase_ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list ):
'''simple docstring'''
print("\n".join(str(lowercase_ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list , __lowerCamelCase: list ):
'''simple docstring'''
if matrix_dimensions(lowercase_ ) == (2, 2):
return default_matrix_multiplication(lowercase_ , lowercase_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(lowercase_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(lowercase_ )
lowercase_ = actual_strassen(lowercase_ , matrix_subtraction(lowercase_ , lowercase_ ) )
lowercase_ = actual_strassen(matrix_addition(lowercase_ , lowercase_ ) , lowercase_ )
lowercase_ = actual_strassen(matrix_addition(lowercase_ , lowercase_ ) , lowercase_ )
lowercase_ = actual_strassen(lowercase_ , matrix_subtraction(lowercase_ , lowercase_ ) )
lowercase_ = actual_strassen(matrix_addition(lowercase_ , lowercase_ ) , matrix_addition(lowercase_ , lowercase_ ) )
lowercase_ = actual_strassen(matrix_subtraction(lowercase_ , lowercase_ ) , matrix_addition(lowercase_ , lowercase_ ) )
lowercase_ = actual_strassen(matrix_subtraction(lowercase_ , lowercase_ ) , matrix_addition(lowercase_ , lowercase_ ) )
lowercase_ = matrix_addition(matrix_subtraction(matrix_addition(lowercase_ , lowercase_ ) , lowercase_ ) , lowercase_ )
lowercase_ = matrix_addition(lowercase_ , lowercase_ )
lowercase_ = matrix_addition(lowercase_ , lowercase_ )
lowercase_ = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase_ , lowercase_ ) , lowercase_ ) , lowercase_ )
# construct the new matrix from our 4 quadrants
lowercase_ = []
for i in range(len(lowercase_ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase_ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list , __lowerCamelCase: list ):
'''simple docstring'''
if matrix_dimensions(lowercase_ )[1] != matrix_dimensions(lowercase_ )[0]:
lowercase_ = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(lowercase_ )
lowercase_ = matrix_dimensions(lowercase_ )
lowercase_ = matrix_dimensions(lowercase_ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase_ = max(*lowercase_ , *lowercase_ )
lowercase_ = int(math.pow(2 , math.ceil(math.loga(lowercase_ ) ) ) )
lowercase_ = matrixa
lowercase_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowercase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase_ = actual_strassen(lowercase_ , lowercase_ )
# Removing the additional zeros
for i in range(0 , lowercase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase_ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
SCREAMING_SNAKE_CASE__ = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 357
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 0
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = hf_hub_url(repo_id=A_ , path=A_ , revision=A_ )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A_ )}'
| 358
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 0
|
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int]=None , **__lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = [x.strip() for x in open(_lowercase ).readlines()]
lowercase_ = [x.strip() for x in open(_lowercase ).readlines()][: len(_lowercase )]
lowercase_ = calculate_rouge(_lowercase , _lowercase , **_lowercase )
if save_path is not None:
save_json(_lowercase , _lowercase , indent=_lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 0
|
SCREAMING_SNAKE_CASE__ = 8.3144598
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
SCREAMING_SNAKE_CASE__ = 3_0_0
SCREAMING_SNAKE_CASE__ = 2_8
SCREAMING_SNAKE_CASE__ = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 360
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 0
|
SCREAMING_SNAKE_CASE__ = 2_5_6
# Modulus to hash a string
SCREAMING_SNAKE_CASE__ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = len(_a )
lowercase_ = len(_a )
if p_len > t_len:
return False
lowercase_ = 0
lowercase_ = 0
lowercase_ = 1
# Calculating the hash of pattern and substring of text
for i in range(_a ):
lowercase_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = "abc1abc12"
lowercase_ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase_ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_a , _a ) and not rabin_karp(_a , _a )
# Test 2)
lowercase_ = "ABABX"
lowercase_ = "ABABZABABYABABX"
assert rabin_karp(_a , _a )
# Test 3)
lowercase_ = "AAAB"
lowercase_ = "ABAAAAAB"
assert rabin_karp(_a , _a )
# Test 4)
lowercase_ = "abcdabcy"
lowercase_ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_a , _a )
# Test 5)
lowercase_ = "Lü"
lowercase_ = "Lüsai"
assert rabin_karp(_a , _a )
lowercase_ = "Lue"
assert not rabin_karp(_a , _a )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 361
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: str = " " ):
'''simple docstring'''
lowercase_ = []
lowercase_ = 0
for index, char in enumerate(__lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
lowercase_ = index + 1
elif index + 1 == len(__lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 362
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
lowerCAmelCase__ = """all_checks"""
lowerCAmelCase__ = """basic_checks"""
lowerCAmelCase__ = """no_checks"""
class __lowerCamelCase ( a__):
"""simple docstring"""
pass
class __lowerCamelCase ( a__):
"""simple docstring"""
pass
class __lowerCamelCase ( a__):
"""simple docstring"""
pass
class __lowerCamelCase ( a__):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict , __lowerCamelCase: Any=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(_lowercase ) - set(_lowercase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_lowercase ) - set(_lowercase ) ) )
if len(set(_lowercase ) - set(_lowercase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_lowercase ) - set(_lowercase ) ) )
lowercase_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowercase_ = " for " + verification_name if verification_name is not None else ""
if len(_lowercase ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __lowerCamelCase ( a__):
"""simple docstring"""
pass
class __lowerCamelCase ( a__):
"""simple docstring"""
pass
class __lowerCamelCase ( a__):
"""simple docstring"""
pass
class __lowerCamelCase ( a__):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict ):
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(_lowercase ) - set(_lowercase ) ) > 0:
raise ExpectedMoreSplits(str(set(_lowercase ) - set(_lowercase ) ) )
if len(set(_lowercase ) - set(_lowercase ) ) > 0:
raise UnexpectedSplits(str(set(_lowercase ) - set(_lowercase ) ) )
lowercase_ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_lowercase ) > 0:
raise NonMatchingSplitsSizesError(str(_lowercase ) )
logger.info("All the splits matched successfully." )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: bool = True ):
'''simple docstring'''
if record_checksum:
lowercase_ = shaaaa()
with open(_lowercase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(_lowercase )
lowercase_ = m.hexdigest()
else:
lowercase_ = None
return {"num_bytes": os.path.getsize(_lowercase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 363
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """▁"""
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/s2t-small-librispeech-asr""": 1_0_2_4,
}
SCREAMING_SNAKE_CASE__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
SCREAMING_SNAKE_CASE__ = {"""mustc""": MUSTC_LANGS}
class __lowerCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = MAX_MODEL_INPUT_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = []
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<unk>" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = None , **UpperCAmelCase , ) -> str:
'''simple docstring'''
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , do_upper_case=a__ , do_lower_case=a__ , tgt_lang=a__ , lang_codes=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
lowercase_ = do_upper_case
lowercase_ = do_lower_case
lowercase_ = load_json(a__ )
lowercase_ = {v: k for k, v in self.encoder.items()}
lowercase_ = spm_file
lowercase_ = load_spm(a__ , self.sp_model_kwargs )
if lang_codes is not None:
lowercase_ = lang_codes
lowercase_ = LANGUAGES[lang_codes]
lowercase_ = [F'<lang:{lang}>' for lang in self.langs]
lowercase_ = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
lowercase_ = self.lang_tokens
lowercase_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase_ = {}
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = new_tgt_lang
self.set_tgt_lang_special_tokens(a__ )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.lang_code_to_id[tgt_lang]
lowercase_ = [lang_code_id]
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(a__ , self.encoder[self.unk_token] )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.decoder.get(a__ , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = []
lowercase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase_ = self.sp_model.decode(a__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase_ = []
else:
current_sub_tokens.append(a__ )
lowercase_ = self.sp_model.decode(a__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> str:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> Any:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
lowercase_ = [1] * len(self.prefix_tokens )
lowercase_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a__ )) + suffix_ones
return prefix_ones + ([0] * len(a__ )) + ([0] * len(a__ )) + suffix_ones
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
'''simple docstring'''
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase_ = {}
lowercase_ = load_spm(self.spm_file , self.sp_model_kwargs )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
'''simple docstring'''
lowercase_ = Path(a__ )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
lowercase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a__ )
elif not os.path.isfile(self.spm_file ):
with open(a__ , "wb" ) as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (str(a__ ), str(a__ ))
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: Dict[str, Any] ):
'''simple docstring'''
lowercase_ = sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
return json.load(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str ):
'''simple docstring'''
with open(__lowerCamelCase , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=2 )
| 364
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 0
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple ):
'''simple docstring'''
lowercase_ = multiprocessing.Manager()
lowercase_ = manager.list()
lowercase_ = multiprocessing.Process(target=snake_case__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase_ = shutil.rmtree
lowercase_ = os.rmdir
lowercase_ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase_ = {}
with swallow_io():
with time_limit(snake_case__ ):
exec(snake_case__ , snake_case__ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
lowercase_ = rmtree
lowercase_ = rmdir
lowercase_ = chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
def signal_handler(__lowerCamelCase: Tuple , __lowerCamelCase: Optional[int] ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , snake_case__ )
signal.signal(signal.SIGALRM , snake_case__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case__ ):
with contextlib.redirect_stderr(snake_case__ ):
with redirect_stdin(snake_case__ ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case__ ):
yield dirname
class __lowerCamelCase ( __lowerCAmelCase ):
"""simple docstring"""
pass
class __lowerCamelCase ( io.StringIO ):
"""simple docstring"""
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
raise OSError
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
raise OSError
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
raise OSError
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
return False
class __lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
lowerCAmelCase__ = '''stdin'''
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
if root == ".":
yield
return
lowercase_ = os.getcwd()
os.chdir(snake_case__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str]=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase_ = None
lowercase_ = None
import os
lowercase_ = "1"
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
import shutil
lowercase_ = None
lowercase_ = None
lowercase_ = None
import subprocess
lowercase_ = None # type: ignore
lowercase_ = None
import sys
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
| 365
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
SCREAMING_SNAKE_CASE__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
with open(lowerCAmelCase__ , "rb" ) as f:
lowercase_ = Image.open(lowerCAmelCase__ )
return im.convert("RGB" )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(default=_lowerCAmelCase , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase__ = field(default=_lowerCAmelCase , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase__ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowerCAmelCase )} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase__ = field(default=_lowerCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = torch.stack([example["pixel_values"] for example in examples] )
lowercase_ = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase_ = {}
if data_args.train_dir is not None:
lowercase_ = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
lowercase_ = os.path.join(data_args.validation_dir , "**" )
lowercase_ = load_dataset(
"imagefolder" , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = dataset["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase_ = dataset["""train"""].features["""labels"""].names
lowercase_ = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
lowercase_ = str(lowerCAmelCase__ )
lowercase_ = label
# Load the accuracy metric from the datasets package
lowercase_ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase: Dict ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase_ = image_processor.size["""shortest_edge"""]
else:
lowercase_ = (image_processor.size["""height"""], image_processor.size["""width"""])
lowercase_ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase_ = Compose(
[
RandomResizedCrop(lowerCAmelCase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase_ = Compose(
[
Resize(lowerCAmelCase__ ),
CenterCrop(lowerCAmelCase__ ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCamelCase: List[str] ):
lowercase_ = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(__lowerCamelCase: Optional[Any] ):
lowercase_ = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowercase_ = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowercase_ = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase__ )
# Initalize our trainer
lowercase_ = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase__ )
trainer.save_metrics("eval" , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 366
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=UpperCamelCase__ , )
assert hasattr(self , "env" )
def A__ ( self , UpperCAmelCase=1 ) -> Dict:
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-single' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowercase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase__ )
| 367
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowercase_ = gray_code_sequence_string(lowerCamelCase_ )
#
# convert them to integers
for i in range(len(lowerCamelCase_ ) ):
lowercase_ = int(sequence[i] , 2 )
return sequence
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase_ = gray_code_sequence_string(bit_count - 1 )
lowercase_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase_ = '0' + smaller_sequence[i]
sequence.append(lowerCamelCase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase_ = '1' + smaller_sequence[i]
sequence.append(lowerCamelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ = {
"""squeezebert/squeezebert-uncased""": 5_1_2,
"""squeezebert/squeezebert-mnli""": 5_1_2,
"""squeezebert/squeezebert-mnli-headless""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class __lowerCamelCase ( __a ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = SqueezeBertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars
):
lowercase_ = getattr(UpperCamelCase__ , normalizer_state.pop("type" ) )
lowercase_ = do_lower_case
lowercase_ = strip_accents
lowercase_ = tokenize_chinese_chars
lowercase_ = normalizer_class(**UpperCamelCase__ )
lowercase_ = do_lower_case
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 369
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = (UnCLIPScheduler,)
def A__ ( self , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**_lowercase )
return config
def A__ ( self ) -> Any:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowercase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowercase )
def A__ ( self ) -> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowercase )
def A__ ( self ) -> Dict:
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowercase , prev_timestep=_lowercase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="fixed_small_log" )
lowercase_ = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="learned_range" )
lowercase_ = scheduler_class(**_lowercase )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowercase ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowercase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowercase ) - -0.0010011 < 1e-5
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**_lowercase )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
lowercase_ = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(_lowercase ) )
lowercase_ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**_lowercase )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
lowercase_ = model(_lowercase , _lowercase )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
_lowercase , _lowercase , _lowercase , prev_timestep=_lowercase , generator=_lowercase ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(_lowercase ) )
lowercase_ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> int:
'''simple docstring'''
pass
| 370
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 0
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
SCREAMING_SNAKE_CASE__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: tuple , __lowerCamelCase: Path , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int]=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=a__ , exist_ok=a__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a__ , a__ , f=output_path.as_posix() , input_names=a__ , output_names=a__ , dynamic_axes=a__ , do_constant_folding=a__ , use_external_data_format=a__ , enable_onnx_checker=a__ , opset_version=a__ , )
else:
export(
a__ , a__ , f=output_path.as_posix() , input_names=a__ , output_names=a__ , dynamic_axes=a__ , do_constant_folding=a__ , opset_version=a__ , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase_ = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
lowercase_ = "cpu"
lowercase_ = Path(a__ )
# VAE DECODER
lowercase_ = AutoencoderKL.from_pretrained(model_path + "/vae" )
lowercase_ = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase_ = vae_decoder.decode
onnx_export(
a__ , model_args=(
torch.randn(1 , a__ , 25 , 25 ).to(device=a__ , dtype=a__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=a__ , )
del vae_decoder
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=1_4,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 371
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297
| 0
|
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = os.path.join(__a , "all_results.json" )
if os.path.exists(__a ):
with open(__a , "r" ) as f:
lowercase_ = json.load(__a )
else:
raise ValueError(F'can\'t find {path}' )
return results
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> List[Any]:
'''simple docstring'''
import xla_spawn
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_A , "argv" , _A ):
lowercase_ = time()
xla_spawn.main()
lowercase_ = time()
lowercase_ = get_results(_A )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A__ ( self ) -> str:
'''simple docstring'''
import xla_spawn
lowercase_ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_A , "argv" , _A ):
xla_spawn.main()
| 350
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict ):
'''simple docstring'''
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: np.ndarray ):
'''simple docstring'''
lowercase_ = XGBClassifier()
classifier.fit(A__ , A__ )
return classifier
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = load_iris()
lowercase_ = data_handling(A__ )
lowercase_ = train_test_split(
A__ , A__ , test_size=0.25 )
lowercase_ = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
lowercase_ = xgboost(A__ , A__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A__ , A__ , A__ , display_labels=A__ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 351
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 0
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1001 ):
'''simple docstring'''
lowercase_ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase_ = 2 * i + 1
lowercase_ = 2 * i
lowercase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 352
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = [1]
for i in range(2 , __lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowercase_ = []
lowercase_ = list(range(__lowerCamelCase ) )
# Find permutation
while factorials:
lowercase_ = factorials.pop()
lowercase_ , lowercase_ = divmod(__lowerCamelCase , __lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowerCamelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = params
lowercase_ = np.array(a__ )
lowercase_ = np.array([len(a__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.lengths )
def A__ ( self ) -> str:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.params.max_model_input_size
lowercase_ = self.lengths > max_len
logger.info(F'Splitting {sum(a__ )} too long sequences.' )
def divide_chunks(UpperCAmelCase , UpperCAmelCase ):
return [l[i : i + n] for i in range(0 , len(a__ ) , a__ )]
lowercase_ = []
lowercase_ = []
if self.params.mlm:
lowercase_ , lowercase_ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
lowercase_ , lowercase_ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase_ = np.insert(a__ , 0 , a__ )
if sub_s[-1] != sep_id:
lowercase_ = np.insert(a__ , len(a__ ) , a__ )
assert len(a__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a__ )
new_tok_ids.extend(a__ )
new_lengths.extend([len(a__ ) for l in sub_seqs] )
lowercase_ = np.array(a__ )
lowercase_ = np.array(a__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = len(self )
lowercase_ = self.lengths > 11
lowercase_ = self.token_ids[indices]
lowercase_ = self.lengths[indices]
lowercase_ = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def A__ ( self ) -> Any:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase_ = self.params.special_tok_ids["unk_token"]
lowercase_ = len(self )
lowercase_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase_ = (unk_occs / self.lengths) < 0.5
lowercase_ = self.token_ids[indices]
lowercase_ = self.lengths[indices]
lowercase_ = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def A__ ( self ) -> Any:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A__ ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = [t[0] for t in batch]
lowercase_ = [t[1] for t in batch]
assert len(a__ ) == len(a__ )
# Max for paddings
lowercase_ = max(a__ )
# Pad token ids
if self.params.mlm:
lowercase_ = self.params.special_tok_ids["pad_token"]
else:
lowercase_ = self.params.special_tok_ids["unk_token"]
lowercase_ = [list(t.astype(a__ ) ) + [pad_idx] * (max_seq_len_ - len(a__ )) for t in token_ids]
assert len(tk_ ) == len(a__ )
assert all(len(a__ ) == max_seq_len_ for t in tk_ )
lowercase_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase_ = torch.tensor(a__ ) # (bs)
return tk_t, lg_t
| 354
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 0
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
lowercase_ = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
lowercase_ = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase , __lowerCamelCase , is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 355
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 0
|
import baseaa
def SCREAMING_SNAKE_CASE_ ( _SCREAMING_SNAKE_CASE: str ):
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def SCREAMING_SNAKE_CASE_ ( _SCREAMING_SNAKE_CASE: bytes ):
'''simple docstring'''
return baseaa.baadecode(lowerCamelCase_ ).decode("utf-8" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 'Hello World!'
SCREAMING_SNAKE_CASE__ = baseaa_encode(test)
print(encoded)
SCREAMING_SNAKE_CASE__ = baseaa_decode(encoded)
print(decoded)
| 356
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 0
|
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: type , __lowerCamelCase: Optional[str] , __lowerCamelCase: Optional[List[str]] = None , ):
'''simple docstring'''
lowercase_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
lowercase_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
lowercase_ = format_type
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Exception , __lowerCamelCase: Optional[str] , __lowerCamelCase: Optional[List[str]] = None ):
'''simple docstring'''
lowercase_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowercase_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
SCREAMING_SNAKE_CASE__ = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
SCREAMING_SNAKE_CASE__ = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
SCREAMING_SNAKE_CASE__ = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[str] , **__lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = get_format_type_from_alias(UpperCAmelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 357
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ["image_processor", "tokenizer"]
lowerCAmelCase__ = "ViTImageProcessor"
lowerCAmelCase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __snake_case , )
lowercase_ = kwargs.pop("feature_extractor" )
lowercase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__snake_case , __snake_case )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowercase_ = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if visual_prompt is not None:
lowercase_ = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
lowercase_ = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if visual_prompt is not None and images is not None:
lowercase_ = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase_ = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __snake_case , )
return self.image_processor_class
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __snake_case , )
return self.image_processor
| 358
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 0
|
from pathlib import Path
import fire
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]="ro" , __lowerCamelCase: Optional[Any]="en" , __lowerCamelCase: List[str]="wmt16" , __lowerCamelCase: Tuple=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
lowercase_ = F'{src_lang}-{tgt_lang}'
print(F'Converting {dataset}-{pair}' )
lowercase_ = datasets.load_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
if save_dir is None:
lowercase_ = F'{dataset}-{pair}'
lowercase_ = Path(lowerCAmelCase__ )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
for split in ds.keys():
print(F'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
lowercase_ = "val" if split == "validation" else split
lowercase_ = save_dir.joinpath(F'{fn}.source' )
lowercase_ = save_dir.joinpath(F'{fn}.target' )
lowercase_ = src_path.open("w+" )
lowercase_ = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase_ = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase ( __a ):
"""simple docstring"""
lowerCAmelCase__ = """bert"""
def __init__( self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=a__ , **a__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = classifier_dropout
class __lowerCamelCase ( __a ):
"""simple docstring"""
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 360
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=18 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.48145466, 0.4578275, 0.40821073] , UpperCAmelCase=[0.26862954, 0.26130258, 0.27577711] , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_normalize
lowercase_ = image_mean
lowercase_ = image_std
lowercase_ = do_convert_rgb
def A__ ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def A__ ( self , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ) -> int:
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowercase_ = []
for i in range(self.batch_size ):
lowercase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase_ = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowercase_ = [torch.from_numpy(__A ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __lowerCamelCase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=__A )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
self.assertTrue(hasattr(__A , "do_center_crop" ) )
self.assertTrue(hasattr(__A , "center_crop" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_convert_rgb" ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class __lowerCamelCase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__A )
lowercase_ = 3
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
self.assertTrue(hasattr(__A , "do_center_crop" ) )
self.assertTrue(hasattr(__A , "center_crop" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_convert_rgb" ) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 361
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 0
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "pixel_values"
lowerCAmelCase__ = False
lowerCAmelCase__ = TimmBackboneConfig
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(__a )
lowercase_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(__a , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
lowercase_ = getattr(__a , "use_pretrained_backbone" , __a )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase_ = config.out_indices if getattr(__a , "out_indices" , __a ) is not None else (-1,)
lowercase_ = timm.create_model(
config.backbone , pretrained=__a , features_only=config.features_only , in_chans=config.num_channels , out_indices=__a , **__a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase_ = self._backbone.return_layers
lowercase_ = {layer["module"]: str(__a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__a )
@classmethod
def A__ ( cls , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase_ = kwargs.pop("config" , TimmBackboneConfig() )
lowercase_ = kwargs.pop("use_timm_backbone" , __a )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
lowercase_ = kwargs.pop("num_channels" , config.num_channels )
lowercase_ = kwargs.pop("features_only" , config.features_only )
lowercase_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
lowercase_ = kwargs.pop("out_indices" , config.out_indices )
lowercase_ = TimmBackboneConfig(
backbone=__a , num_channels=__a , features_only=__a , use_pretrained_backbone=__a , out_indices=__a , )
return super()._from_config(__a , **__a )
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
pass
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase_ = self._all_layers
lowercase_ = self._backbone(__a , **__a )
lowercase_ = self._return_layers
lowercase_ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase_ = self._backbone(__a , **__a )
lowercase_ = None
lowercase_ = tuple(__a )
lowercase_ = tuple(__a ) if hidden_states is not None else None
if not return_dict:
lowercase_ = (feature_maps,)
if output_hidden_states:
lowercase_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__a , hidden_states=__a , attentions=__a )
| 362
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 0
|
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 363
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 0
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowercase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase_ = CLIPTextModel(_A )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
if str(_A ).startswith("mps" ):
lowercase_ = torch.manual_seed(_A )
else:
lowercase_ = torch.Generator(device=_A ).manual_seed(_A )
lowercase_ = 2
lowercase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , )
lowercase_ = floats_tensor(control_image.shape , rng=random.Random(_A ) ).to(_A )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((64, 64) )
lowercase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase ):
if isinstance(_A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_A )
torch.manual_seed(0 )
lowercase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_A )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase_ = CLIPTextModel(_A )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase_ = MultiControlNetModel([controlneta, controlneta] )
lowercase_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
if str(_A ).startswith("mps" ):
lowercase_ = torch.manual_seed(_A )
else:
lowercase_ = torch.Generator(device=_A ).manual_seed(_A )
lowercase_ = 2
lowercase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , ),
]
lowercase_ = floats_tensor(control_image[0].shape , rng=random.Random(_A ) ).to(_A )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((64, 64) )
lowercase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**_A )
pipe.to(_A )
lowercase_ = 10.0
lowercase_ = 4
lowercase_ = self.get_dummy_inputs(_A )
lowercase_ = steps
lowercase_ = scale
lowercase_ = pipe(**_A )[0]
lowercase_ = self.get_dummy_inputs(_A )
lowercase_ = steps
lowercase_ = scale
lowercase_ = pipe(**_A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase_ = self.get_dummy_inputs(_A )
lowercase_ = steps
lowercase_ = scale
lowercase_ = pipe(**_A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase_ = self.get_dummy_inputs(_A )
lowercase_ = steps
lowercase_ = scale
lowercase_ = pipe(**_A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
lowercase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=_A , controlnet=_A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = "evil space-punk bird"
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
lowercase_ = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
lowercase_ = pipe(
_A , _A , control_image=_A , generator=_A , output_type="np" , num_inference_steps=50 , strength=0.6 , )
lowercase_ = output.images[0]
assert image.shape == (512, 512, 3)
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 364
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = []
lowercase_ = []
for i in range(self.num_layers ):
lowercase_ = self.in_channels if i == 0 else self.out_channels
lowercase_ = FlaxResnetBlockaD(
in_channels=UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase )
lowercase_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase )
lowercase_ = resnets
lowercase_ = attentions
if self.add_downsample:
lowercase_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> str:
'''simple docstring'''
lowercase_ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowercase_ = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase )
lowercase_ = attn(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowercase_ = self.downsamplers_a(UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
lowerCAmelCase__ = jnp.floataa
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = []
for i in range(self.num_layers ):
lowercase_ = self.in_channels if i == 0 else self.out_channels
lowercase_ = FlaxResnetBlockaD(
in_channels=UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase )
lowercase_ = resnets
if self.add_downsample:
lowercase_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = ()
for resnet in self.resnets:
lowercase_ = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowercase_ = self.downsamplers_a(UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = []
lowercase_ = []
for i in range(self.num_layers ):
lowercase_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase_ = self.prev_output_channel if i == 0 else self.out_channels
lowercase_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase )
lowercase_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase )
lowercase_ = resnets
lowercase_ = attentions
if self.add_upsample:
lowercase_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> List[str]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowercase_ = res_hidden_states_tuple[-1]
lowercase_ = res_hidden_states_tuple[:-1]
lowercase_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase_ = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase )
lowercase_ = attn(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase )
if self.add_upsample:
lowercase_ = self.upsamplers_a(UpperCAmelCase )
return hidden_states
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
lowerCAmelCase__ = jnp.floataa
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(self.num_layers ):
lowercase_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase_ = self.prev_output_channel if i == 0 else self.out_channels
lowercase_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase )
lowercase_ = resnets
if self.add_upsample:
lowercase_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> Dict:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
lowercase_ = res_hidden_states_tuple[-1]
lowercase_ = res_hidden_states_tuple[:-1]
lowercase_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase_ = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase )
if self.add_upsample:
lowercase_ = self.upsamplers_a(UpperCAmelCase )
return hidden_states
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowercase_ = []
for _ in range(self.num_layers ):
lowercase_ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase )
lowercase_ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase )
lowercase_ = resnets
lowercase_ = attentions
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> Dict:
'''simple docstring'''
lowercase_ = self.resnets[0](UpperCAmelCase , UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowercase_ = attn(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase )
lowercase_ = resnet(UpperCAmelCase , UpperCAmelCase , deterministic=UpperCAmelCase )
return hidden_states
| 366
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 0
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
SCREAMING_SNAKE_CASE__ = """
import os
"""
SCREAMING_SNAKE_CASE__ = """
def foo():
import os
return False
"""
SCREAMING_SNAKE_CASE__ = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
except:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
SCREAMING_SNAKE_CASE__ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , __lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = os.path.join(__lowerCamelCase , "test_file.py" )
with open(__lowerCamelCase , "w" ) as _tmp_file:
_tmp_file.write(__lowerCamelCase )
lowercase_ = get_imports(__lowerCamelCase )
assert parsed_imports == ["os"]
| 367
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 0
|
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = multiprocessing.Manager()
lowercase_ = manager.list()
lowercase_ = multiprocessing.Process(target=snake_case_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase_ = shutil.rmtree
lowercase_ = os.rmdir
lowercase_ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase_ = {}
with swallow_io():
with time_limit(snake_case_ ):
exec(snake_case_ , snake_case_ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
lowercase_ = rmtree
lowercase_ = rmdir
lowercase_ = chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict ):
'''simple docstring'''
def signal_handler(__lowerCamelCase: Optional[Any] , __lowerCamelCase: int ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , snake_case_ )
signal.signal(signal.SIGALRM , snake_case_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case_ ):
with contextlib.redirect_stderr(snake_case_ ):
with redirect_stdin(snake_case_ ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case_ ):
yield dirname
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
pass
class __lowerCamelCase ( io.StringIO ):
"""simple docstring"""
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
raise OSError
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
raise OSError
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
raise OSError
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return False
class __lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
lowerCAmelCase__ = """stdin"""
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
if root == ".":
yield
return
lowercase_ = os.getcwd()
os.chdir(snake_case_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case_ )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase_ = None
lowercase_ = None
import os
lowercase_ = "1"
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
import shutil
lowercase_ = None
lowercase_ = None
lowercase_ = None
import subprocess
lowercase_ = None # type: ignore
lowercase_ = None
import sys
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
| 368
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 0
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = arr.split("," )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = [int(self.array[0] )] * len(self.array )
lowercase_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowercase_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowercase_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE__ = SubArray(whole_array)
SCREAMING_SNAKE_CASE__ = array.solve_sub_array()
print(("""the results is:""", re))
| 369
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.