code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__snake_case : List[str] = logging.getLogger(__name__)
__snake_case : Optional[int] = {'facebook/bart-base': BartForConditionalGeneration}
__snake_case : Tuple = {'facebook/bart-base': BartTokenizer}
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ : List[Any] =argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""", type=__snake_case, default=__snake_case, help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""", type=__snake_case, default=5, help="""The maximum total input sequence length after tokenization.""", )
parser.add_argument(
"""--num_beams""", type=__snake_case, default=__snake_case, help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
), )
parser.add_argument(
"""--model_name_or_path""", type=__snake_case, help="""Path to pretrained model or model identifier from huggingface.co/models.""", required=__snake_case, )
parser.add_argument(
"""--config_name""", type=__snake_case, default=__snake_case, help="""Pretrained config name or path if not the same as model_name""", )
parser.add_argument(
"""--device""", type=__snake_case, default="""cpu""", help="""Device where the model will be run""", )
parser.add_argument("""--output_file_path""", type=__snake_case, default=__snake_case, help="""Where to store the final ONNX file.""" )
A__ : Union[str, Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Any="cpu" ) -> List[str]:
"""simple docstring"""
A__ : Dict =model_dict[model_name].from_pretrained(__snake_case ).to(__snake_case )
A__ : Any =tokenizer_dict[model_name].from_pretrained(__snake_case )
if model_name in ["facebook/bart-base"]:
A__ : List[Any] =0
A__ : Any =None
A__ : Dict =0
return huggingface_model, tokenizer
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Tuple, __snake_case : int, __snake_case : int, __snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
model.eval()
A__ : str =None
A__ : List[Any] =torch.jit.script(BARTBeamSearchGenerator(__snake_case ) )
with torch.no_grad():
A__ : Tuple ="""My friends are cool but they eat too many carbs."""
A__ : Tuple =tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1_024, return_tensors="""pt""" ).to(model.device )
A__ : Optional[Any] =model.generate(
inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], num_beams=__snake_case, max_length=__snake_case, early_stopping=__snake_case, decoder_start_token_id=model.config.decoder_start_token_id, )
torch.onnx.export(
__snake_case, (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
), __snake_case, opset_version=14, input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""], output_names=["""output_ids"""], dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
}, example_outputs=__snake_case, )
logger.info("""Model exported to {}""".format(__snake_case ) )
A__ : Union[str, Any] =remove_dup_initializers(os.path.abspath(__snake_case ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__snake_case ) )
A__ : str =onnxruntime.InferenceSession(__snake_case )
A__ : List[Any] =ort_sess.run(
__snake_case, {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__snake_case ),
"""max_length""": np.array(__snake_case ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
}, )
np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1E-3, atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[str] =parse_args()
A__ : Optional[int] =5
A__ : Optional[int] =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
A__ : List[Any] =torch.device(args.device )
A__ , A__ : List[Any] =load_model_tokenizer(args.model_name_or_path, __snake_case )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__snake_case )
if args.max_length:
A__ : Union[str, Any] =args.max_length
if args.num_beams:
A__ : Tuple =args.num_beams
if args.output_file_path:
A__ : str =args.output_file_path
else:
A__ : Optional[Any] ="""BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
if __name__ == "__main__":
main()
| 134 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
A__ : list[float] =list(lowerCAmelCase_ )
A__ : Optional[int] =degree
def __add__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
A__ : int =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
A__ : Any =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int | float ) -> int | float:
'''simple docstring'''
A__ : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str] ) -> str:
'''simple docstring'''
A__ : Optional[int] =""""""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.__str__()
def lowercase__ ( self : str ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * self.degree
for i in range(self.degree ):
A__ : Union[str, Any] =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + 2)
A__ : Any =constant
for i in range(self.degree + 1 ):
A__ : str =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Optional[int] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
return not self.__eq__(lowerCAmelCase_ )
| 134 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : Optional[int] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''decision_transformer'''
A__ = ['''past_key_values''']
A__ = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , __a : Union[str, Any]=17 , __a : Dict=4 , __a : str=128 , __a : Tuple=4096 , __a : str=True , __a : List[str]=1 , __a : Optional[Any]=1024 , __a : Any=3 , __a : List[str]=1 , __a : str=None , __a : Union[str, Any]="relu" , __a : Optional[Any]=0.1 , __a : str=0.1 , __a : List[str]=0.1 , __a : Any=1e-5 , __a : Dict=0.0_2 , __a : str=True , __a : str=True , __a : List[str]=50256 , __a : Any=50256 , __a : str=False , __a : List[str]=False , **__a : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = state_dim
__snake_case : Dict = act_dim
__snake_case : Optional[int] = hidden_size
__snake_case : int = max_ep_len
__snake_case : Tuple = action_tanh
__snake_case : str = vocab_size
__snake_case : Tuple = n_positions
__snake_case : Optional[Any] = n_layer
__snake_case : int = n_head
__snake_case : List[str] = n_inner
__snake_case : List[Any] = activation_function
__snake_case : Optional[Any] = resid_pdrop
__snake_case : List[str] = embd_pdrop
__snake_case : List[Any] = attn_pdrop
__snake_case : Any = layer_norm_epsilon
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[str] = scale_attn_weights
__snake_case : str = use_cache
__snake_case : List[Any] = scale_attn_by_inverse_layer_idx
__snake_case : Optional[int] = reorder_and_upcast_attn
__snake_case : Dict = bos_token_id
__snake_case : Tuple = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : int = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
_lowercase : Tuple = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCamelCase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95 | 0 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self , *__A , __A=None , __A=None , **__A ) -> Dict:
super().__init__(*_a , **_a )
lowerCAmelCase_ :int = eval_examples
lowerCAmelCase_ :Optional[Any] = post_process_function
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = "eval" ) -> Any:
lowerCAmelCase_ :Dict = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase_ :Union[str, Any] = self.get_eval_dataloader(_a )
lowerCAmelCase_ :Tuple = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase_ :int = self.compute_metrics
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase_ :List[Any] = time.time()
try:
lowerCAmelCase_ :Optional[Any] = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCAmelCase_ :List[Any] = compute_metrics
lowerCAmelCase_ :Optional[Any] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase_ :List[Any] = self.post_process_function(_a , _a , output.predictions )
lowerCAmelCase_ :str = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase_ :Optional[int] = metrics.pop(_a )
metrics.update(output.metrics )
else:
lowerCAmelCase_ :Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase_ :Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def __lowerCAmelCase ( self , __A , __A , __A=None , __A = "test" ) -> List[Any]:
lowerCAmelCase_ :Optional[int] = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase_ :int = self.compute_metrics
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase_ :List[Any] = time.time()
try:
lowerCAmelCase_ :str = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCAmelCase_ :Optional[int] = compute_metrics
lowerCAmelCase_ :Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase_ :Dict = self.post_process_function(_a , _a , output.predictions , """predict""" )
lowerCAmelCase_ :Optional[Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase_ :Tuple = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 354 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase_ )
env_command_parser(subparsers=lowerCAmelCase_ )
launch_command_parser(subparsers=lowerCAmelCase_ )
tpu_command_parser(subparsers=lowerCAmelCase_ )
test_command_parser(subparsers=lowerCAmelCase_ )
# Let's go
__SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(lowerCAmelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 54 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 0 |
from __future__ import annotations
import pandas as pd
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE_ = burst_time[i]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 9_99_99_99_99
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE_ = remaining_time[j]
SCREAMING_SNAKE_CASE_ = j
SCREAMING_SNAKE_CASE_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE_ = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE_ = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE_ = False
# Find finish time of current process
SCREAMING_SNAKE_CASE_ = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE_ = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE_ = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE_ = 0
# Increment time
increment_time += 1
return waiting_time
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE_ = burst_time[i] + waiting_time[i]
return turn_around_time
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE_ = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE_ = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('''Average turn around time =''', total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__UpperCAmelCase = int(input())
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__UpperCAmelCase = map(int, input().split())
__UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCAmelCase = burst_time
__UpperCAmelCase = no_of_processes
__UpperCAmelCase = waiting_time
__UpperCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__UpperCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 363 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__lowerCamelCase, num_labels=10_00, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def A__ ( __lowerCamelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_config(__lowerCamelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__lowerCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_snake_case : int = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str] ):
if isinstance(lowerCAmelCase_, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase_, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase_ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : int , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_5_6}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = offset
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" in size:
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size['shortest_edge'] , default_to_square=lowerCAmelCase_ )
elif "height" in size and "width" in size:
__lowerCAmelCase = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> Dict:
__lowerCAmelCase = image.astype(np.floataa )
if offset:
__lowerCAmelCase = image - (scale / 2)
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Any , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = to_numpy_array(lowerCAmelCase_ )
if do_resize:
__lowerCAmelCase = self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ )
if do_center_crop:
__lowerCAmelCase = self.center_crop(lowerCAmelCase_ , size=lowerCAmelCase_ )
if do_rescale:
__lowerCAmelCase = self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ , offset=lowerCAmelCase_ )
if do_normalize:
__lowerCAmelCase = self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ )
__lowerCAmelCase = to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ )
return image
def lowercase ( self : Optional[int] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : Any , ) -> PIL.Image.Image:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = offset if offset is not None else self.offset
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__lowerCAmelCase = make_batched(lowerCAmelCase_ )
__lowerCAmelCase = [
[
self._preprocess_image(
image=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , crop_size=lowerCAmelCase_ , do_rescale=lowerCAmelCase_ , rescale_factor=lowerCAmelCase_ , offset=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , )
for img in video
]
for video in videos
]
__lowerCAmelCase = {'pixel_values': videos}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 284 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case : Dict = pytest.mark.integration
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCAmelCase_ ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase ( self : List[str] ) -> Tuple:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
__lowerCAmelCase = dset.map(
lambda lowerCAmelCase_ , lowerCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ )
__lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase ( self : Optional[Any] ) -> str:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : int ) -> Optional[Any]:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase ( self : Union[str, Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search_batch , queries[0] )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> List[str]:
import faiss
__lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase ( self : Union[str, Any] ) -> Dict:
import faiss
__lowerCAmelCase = faiss.IndexFlat(5 )
__lowerCAmelCase = FaissIndex(custom_index=lowerCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase ( self : str ) -> Any:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
__lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__lowerCAmelCase = 'index.faiss'
__lowerCAmelCase = F"""mock://{index_name}"""
index.save(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = FaissIndex.load(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = np.zeros(5, dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = Elasticsearch()
__lowerCAmelCase = {'acknowledged': True}
__lowerCAmelCase = ElasticSearchIndex(es_client=lowerCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
# batched queries with timeout
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ , request_timeout=3_0 )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
| 284 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(__lowerCAmelCase ) == 1:
return True
__SCREAMING_SNAKE_CASE = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _lowerCAmelCase ( UpperCamelCase_ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__SCREAMING_SNAKE_CASE = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[str] = '''xlm-prophetnet'''
__lowercase : Dict = ['''past_key_values''']
__lowercase : Any = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 3_0_5_2_2 , lowerCAmelCase__ = 1_0_2_4 , lowerCAmelCase__ = 4_0_9_6 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 4_0_9_6 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 1_2_8 , lowerCAmelCase__ = False , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = True , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2 , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = num_encoder_layers
__SCREAMING_SNAKE_CASE = num_encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = num_decoder_layers
__SCREAMING_SNAKE_CASE = num_decoder_attention_heads
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = init_std # Normal(0, this parameter)
__SCREAMING_SNAKE_CASE = activation_function
# parameters for xlmprophetnet
__SCREAMING_SNAKE_CASE = ngram
__SCREAMING_SNAKE_CASE = num_buckets
__SCREAMING_SNAKE_CASE = relative_max_distance
__SCREAMING_SNAKE_CASE = disable_ngram_loss
__SCREAMING_SNAKE_CASE = eps
# 3 Types of Dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def snake_case_ ( self):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def snake_case_ ( self , lowerCAmelCase__):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""")
| 255 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def _a ( UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(UpperCamelCase_ , 0 , UpperCamelCase_ , args=(UpperCamelCase_) )[0]
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
return math.pow(UpperCamelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 340 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=3, A=4, A=[10, 20, 30, 40], A=[2, 2, 3, 2], A=True, A=True, A=37, A="gelu", A=10, A=0.02, A=["stage2", "stage3", "stage4"], A=[2, 3, 4], A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : Tuple = num_stages
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : List[str] = out_features
SCREAMING_SNAKE_CASE : Optional[Any] = out_indices
SCREAMING_SNAKE_CASE : Any = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=A, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ConvNextModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ConvNextForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ConvNextBackbone(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Dict = ConvNextBackbone(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A : Dict = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A : str = True
A : Dict = False
A : str = False
A : Tuple = False
A : Any = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ConvNextModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(A )
SCREAMING_SNAKE_CASE : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : str = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(A ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = ConvNextModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(A )
SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@require_torch
class _a ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = (ConvNextBackbone,) if is_torch_available() else ()
A : int = ConvNextConfig
A : Union[str, Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ConvNextModelTester(self )
| 246 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 246 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase__ : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ) -> Optional[int]:
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ , A__ , A__ = False ) -> Union[str, Any]:
snake_case__ : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case__ : str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
snake_case__ : str = 'cpu'
snake_case__ : Optional[Any] = Path(A__ )
# VAE DECODER
snake_case__ : Tuple = AutoencoderKL.from_pretrained(model_path + '/vae' )
snake_case__ : Any = vae_decoder.config.latent_channels
# forward only through the decoder part
snake_case__ : str = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase__ : Tuple = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 143 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase__ : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase__ : Optional[int] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase__ : Optional[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase__ : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase__ : Any = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase__ : Dict = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase__ : Optional[int] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def UpperCamelCase__ ( ) -> Any:
snake_case__ , snake_case__ : List[str] = randrange(len(A__ ) ), randrange(len(A__ ) )
snake_case__ : str = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
snake_case__ , snake_case__ : Any = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase__ ( A__ = 100 ) -> Optional[int]:
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Any:
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Dict:
snake_case__ : Optional[int] = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> str:
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = [PokerHand(A__ ) for hand in SORTED_HANDS]
snake_case__ : Optional[Any] = poker_hands.copy()
shuffle(A__ )
snake_case__ : Tuple = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def UpperCamelCase__ ( ) -> str:
# Test that five high straights are compared correctly.
snake_case__ : int = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase__ ( ) -> Union[str, Any]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
snake_case__ : Optional[int] = PokerHand('2C 4S AS 3D 5C' )
snake_case__ : Optional[int] = True
snake_case__ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase__ ( ) -> List[str]:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
snake_case__ : Any = 0
snake_case__ : Optional[Any] = os.path.abspath(os.path.dirname(A__ ) )
snake_case__ : List[str] = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
snake_case__ : Tuple = line[:14].strip()
snake_case__ : List[str] = line[15:].strip()
snake_case__ , snake_case__ : Any = PokerHand(A__ ), PokerHand(A__ )
snake_case__ : Tuple = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 143 | 1 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : List[str] = TaConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase : List[str] = TaForConditionalGeneration(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 133 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
lowerCAmelCase : str = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
lowerCAmelCase , lowerCAmelCase : Any = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 133 | 1 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowercase ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 46 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = args.pruning_method
_UpperCAmelCase = args.threshold
_UpperCAmelCase = args.model_name_or_path.rstrip('''/''' )
_UpperCAmelCase = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
_UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
_UpperCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
_UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1
_UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = s * (r - l) + l
_UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
_UpperCAmelCase = os.path.join(
os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'\nCreated folder {target_model_path}' )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__A : Optional[int] = parser.parse_args()
main(args)
| 260 | 0 |
"""simple docstring"""
import torch
def _snake_case ( ) -> Union[str, Any]:
if torch.cuda.is_available():
lowerCamelCase_ : int =torch.cuda.device_count()
else:
lowerCamelCase_ : List[str] =0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 209 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ) -> List[Any]:
if isinstance(lowerCamelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
lowerCamelCase_ : int =[image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ : Tuple =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowerCamelCase_ : Any =np.concatenate(lowerCamelCase__ , axis=0 )
lowerCamelCase_ : Any =np.array(lowerCamelCase__ ).astype(np.floataa ) / 255.0
lowerCamelCase_ : Any =image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ : Dict =2.0 * image - 1.0
lowerCamelCase_ : List[Any] =torch.from_numpy(lowerCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ : Tuple =torch.cat(lowerCamelCase__ , dim=0 )
return image
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=0.9995 ) -> List[str]:
if not isinstance(lowerCamelCase__ , np.ndarray ):
lowerCamelCase_ : List[Any] =True
lowerCamelCase_ : Tuple =va.device
lowerCamelCase_ : Optional[int] =va.cpu().numpy()
lowerCamelCase_ : List[Any] =va.cpu().numpy()
lowerCamelCase_ : Any =np.sum(va * va / (np.linalg.norm(lowerCamelCase__ ) * np.linalg.norm(lowerCamelCase__ )) )
if np.abs(lowerCamelCase__ ) > DOT_THRESHOLD:
lowerCamelCase_ : str =(1 - t) * va + t * va
else:
lowerCamelCase_ : Tuple =np.arccos(lowerCamelCase__ )
lowerCamelCase_ : Any =np.sin(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =theta_a * t
lowerCamelCase_ : Tuple =np.sin(lowerCamelCase__ )
lowerCamelCase_ : List[str] =np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ : Union[str, Any] =sin_theta_t / sin_theta_a
lowerCamelCase_ : str =sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ : Any =torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
return va
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict ) -> List[str]:
lowerCamelCase_ : List[Any] =F.normalize(lowerCamelCase__ , dim=-1 )
lowerCamelCase_ : Any =F.normalize(lowerCamelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ) -> Any:
for param in model.parameters():
lowerCamelCase_ : Any =value
class lowercase__ ( snake_case__ ):
def __init__( self : Union[str, Any] , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , snake_case__ : CLIPFeatureExtractor , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : str=None , ):
super().__init__()
self.register_modules(
vae=snake_case__ , text_encoder=snake_case__ , clip_model=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , coca_model=snake_case__ , coca_tokenizer=snake_case__ , coca_transform=snake_case__ , )
lowerCamelCase_ : Optional[Any] =(
feature_extractor.size
if isinstance(feature_extractor.size , snake_case__ )
else feature_extractor.size["shortest_edge"]
)
lowerCamelCase_ : Union[str, Any] =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , snake_case__ )
set_requires_grad(self.clip_model , snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ : List[Any] =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def UpperCAmelCase__ ( self : int ):
self.enable_attention_slicing(snake_case__ )
def UpperCAmelCase__ ( self : str ):
set_requires_grad(self.vae , snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
set_requires_grad(self.vae , snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
set_requires_grad(self.unet , snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
set_requires_grad(self.unet , snake_case__ )
def UpperCAmelCase__ ( self : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
# get the original timestep using init_timestep
lowerCamelCase_ : Optional[int] =min(int(num_inference_steps * strength ) , snake_case__ )
lowerCamelCase_ : Dict =max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ : Union[str, Any] =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[Any]=None ):
if not isinstance(snake_case__ , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(snake_case__ )}""" )
lowerCamelCase_ : List[str] =image.to(device=snake_case__ , dtype=snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Optional[int] =[
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
lowerCamelCase_ : Tuple =torch.cat(snake_case__ , dim=0 )
else:
lowerCamelCase_ : Any =self.vae.encode(snake_case__ ).latent_dist.sample(snake_case__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ : Optional[Any] =0.18_215 * init_latents
lowerCamelCase_ : Optional[Any] =init_latents.repeat_interleave(snake_case__ , dim=0 )
lowerCamelCase_ : List[Any] =randn_tensor(init_latents.shape , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
lowerCamelCase_ : Optional[int] =self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase_ : int =init_latents
return latents
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] ):
lowerCamelCase_ : Optional[int] =self.coca_transform(snake_case__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ : Tuple =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ : str =self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def UpperCAmelCase__ ( self : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
lowerCamelCase_ : int =self.feature_extractor.preprocess(snake_case__ )
lowerCamelCase_ : Tuple =torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ : Optional[int] =self.clip_model.get_image_features(snake_case__ )
lowerCamelCase_ : List[str] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case__ )
lowerCamelCase_ : str =image_embeddings_clip.repeat_interleave(snake_case__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : str , ):
lowerCamelCase_ : Any =latents.detach().requires_grad_()
lowerCamelCase_ : Union[str, Any] =self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCamelCase_ : Any =self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ : Any =self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ : str =1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ : Dict =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ : Tuple =torch.sqrt(snake_case__ )
lowerCamelCase_ : Optional[int] =pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , snake_case__ ):
lowerCamelCase_ : Optional[Any] =self.scheduler.sigmas[index]
lowerCamelCase_ : Optional[int] =latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ : str =1 / 0.18_215 * sample
lowerCamelCase_ : List[Any] =self.vae.decode(snake_case__ ).sample
lowerCamelCase_ : List[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Any =transforms.Resize(self.feature_extractor_size )(snake_case__ )
lowerCamelCase_ : Any =self.normalize(snake_case__ ).to(latents.dtype )
lowerCamelCase_ : Any =self.clip_model.get_image_features(snake_case__ )
lowerCamelCase_ : Optional[int] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case__ )
lowerCamelCase_ : Union[str, Any] =spherical_dist_loss(snake_case__ , snake_case__ ).mean() * clip_guidance_scale
lowerCamelCase_ : Union[str, Any] =-torch.autograd.grad(snake_case__ , snake_case__ )[0]
if isinstance(self.scheduler , snake_case__ ):
lowerCamelCase_ : int =latents.detach() + grads * (sigma**2)
lowerCamelCase_ : Tuple =noise_pred_original
else:
lowerCamelCase_ : Union[str, Any] =noise_pred_original - torch.sqrt(snake_case__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Any , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = 512 , snake_case__ : Optional[int] = 512 , snake_case__ : float = 0.6 , snake_case__ : Optional[int] = 50 , snake_case__ : Optional[float] = 7.5 , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[float] = 100 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : float = 0.8 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , ):
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(snake_case__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(snake_case__ , torch.Generator ) and batch_size > 1:
lowerCamelCase_ : List[str] =[generator] + [None] * (batch_size - 1)
lowerCamelCase_ : Any =[
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowerCamelCase_ : Optional[Any] =[x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ : Any =", ".join(snake_case__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowerCamelCase_ : Union[str, Any] =self.get_image_description(snake_case__ )
if style_prompt is None:
if len(snake_case__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowerCamelCase_ : str =self.get_image_description(snake_case__ )
# get prompt text embeddings for content and style
lowerCamelCase_ : str =self.tokenizer(
snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
lowerCamelCase_ : List[str] =self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ : int =self.tokenizer(
snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
lowerCamelCase_ : Optional[int] =self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ : Dict =slerp(snake_case__ , snake_case__ , snake_case__ )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ : str =text_embeddings.repeat_interleave(snake_case__ , dim=0 )
# set timesteps
lowerCamelCase_ : List[Any] ="offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ : List[Any] ={}
if accepts_offset:
lowerCamelCase_ : Optional[int] =1
self.scheduler.set_timesteps(snake_case__ , **snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =self.get_timesteps(snake_case__ , snake_case__ , self.device )
lowerCamelCase_ : Union[str, Any] =timesteps[:1].repeat(snake_case__ )
# Preprocess image
lowerCamelCase_ : str =preprocess(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase_ : int =self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , text_embeddings.dtype , self.device , snake_case__ )
lowerCamelCase_ : Dict =preprocess(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase_ : Tuple =self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , text_embeddings.dtype , self.device , snake_case__ )
lowerCamelCase_ : int =slerp(snake_case__ , snake_case__ , snake_case__ )
if clip_guidance_scale > 0:
lowerCamelCase_ : List[Any] =self.get_clip_image_embeddings(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] =self.get_clip_image_embeddings(snake_case__ , snake_case__ )
lowerCamelCase_ : List[Any] =slerp(
snake_case__ , snake_case__ , snake_case__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ : str =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ : List[Any] =content_text_input.input_ids.shape[-1]
lowerCamelCase_ : Union[str, Any] =self.tokenizer([""] , padding="max_length" , max_length=snake_case__ , return_tensors="pt" )
lowerCamelCase_ : List[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ : List[str] =uncond_embeddings.repeat_interleave(snake_case__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : List[str] =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ : Optional[Any] =(batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ : List[str] =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ : Union[str, Any] =torch.randn(snake_case__ , generator=snake_case__ , device="cpu" , dtype=snake_case__ ).to(
self.device )
else:
lowerCamelCase_ : Tuple =torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCamelCase_ : Any =latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ : List[Any] ="eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Dict ={}
if accepts_eta:
lowerCamelCase_ : Tuple =eta
# check if the scheduler accepts generator
lowerCamelCase_ : Tuple ="generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ : Optional[int] =generator
with self.progress_bar(total=snake_case__ ):
for i, t in enumerate(snake_case__ ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : List[str] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : Optional[Any] =self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCamelCase_ : Tuple =self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ : Dict =noise_pred.chunk(2 )
lowerCamelCase_ : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ : Optional[int] =(
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.cond_fn(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : Dict =self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ : str =1 / 0.18_215 * latents
lowerCamelCase_ : List[str] =self.vae.decode(snake_case__ ).sample
lowerCamelCase_ : str =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : List[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 209 | 1 |
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> Dict:
_lowercase =data
_lowercase =previous
_lowercase =next_node
def __str__(self ) -> str:
return f"{self.data}"
def __A (self ) -> int:
return self.data
def __A (self ) -> Tuple:
return self.next
def __A (self ) -> str:
return self.previous
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> int:
_lowercase =head
def __iter__(self ) -> Optional[Any]:
return self
def __A (self ) -> List[str]:
if not self.current:
raise StopIteration
else:
_lowercase =self.current.get_data()
_lowercase =self.current.get_next()
return value
class lowerCamelCase__ :
def __init__(self ) -> List[Any]:
_lowercase =None # First node in list
_lowercase =None # Last node in list
def __str__(self ) -> Any:
_lowercase =self.head
_lowercase =[]
while current is not None:
nodes.append(current.get_data() )
_lowercase =current.get_next()
return " ".join(str(lowerCamelCase__ ) for node in nodes )
def __contains__(self , UpperCAmelCase ) -> Tuple:
_lowercase =self.head
while current:
if current.get_data() == value:
return True
_lowercase =current.get_next()
return False
def __iter__(self ) -> Any:
return LinkedListIterator(self.head )
def __A (self ) -> str:
if self.head:
return self.head.get_data()
return None
def __A (self ) -> Any:
if self.tail:
return self.tail.get_data()
return None
def __A (self , UpperCAmelCase ) -> None:
if self.head is None:
_lowercase =node
_lowercase =node
else:
self.insert_before_node(self.head , lowerCamelCase__ )
def __A (self , UpperCAmelCase ) -> None:
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.insert_after_node(self.tail , lowerCamelCase__ )
def __A (self , UpperCAmelCase ) -> None:
_lowercase =Node(lowerCamelCase__ )
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.set_tail(lowerCamelCase__ )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> None:
_lowercase =node
_lowercase =node.previous
if node.get_previous() is None:
_lowercase =node_to_insert
else:
_lowercase =node_to_insert
_lowercase =node_to_insert
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> None:
_lowercase =node
_lowercase =node.next
if node.get_next() is None:
_lowercase =node_to_insert
else:
_lowercase =node_to_insert
_lowercase =node_to_insert
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> None:
_lowercase =1
_lowercase =Node(lowerCamelCase__ )
_lowercase =self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase__ , lowerCamelCase__ )
return
current_position += 1
_lowercase =node.next
self.insert_after_node(self.tail , lowerCamelCase__ )
def __A (self , UpperCAmelCase ) -> Node:
_lowercase =self.head
while node:
if node.get_data() == item:
return node
_lowercase =node.get_next()
raise Exception('''Node not found''' )
def __A (self , UpperCAmelCase ) -> Tuple:
if (node := self.get_node(lowerCamelCase__ )) is not None:
if node == self.head:
_lowercase =self.head.get_next()
if node == self.tail:
_lowercase =self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase__ )
@staticmethod
def __A (UpperCAmelCase ) -> None:
if node.get_next():
_lowercase =node.previous
if node.get_previous():
_lowercase =node.next
_lowercase =None
_lowercase =None
def __A (self ) -> Any:
return self.head is None
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCamelCase : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _a ( SCREAMING_SNAKE_CASE : str = "dhaka" , SCREAMING_SNAKE_CASE : int = 5 ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
UpperCamelCase__ : str = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCamelCase__ : List[str] = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = BeautifulSoup(html.text , '''html.parser''' )
UpperCamelCase__ : Union[str, Any] = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCamelCase__ : Optional[Any] = json.dumps(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = json.loads(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
UpperCamelCase__ : Optional[Any] = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : List[Any] = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
UpperCamelCase__ : Optional[int] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = urllib.request.build_opener()
UpperCamelCase__ : Optional[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__UpperCamelCase : List[Any] = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 146 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =FlaxAutoencoderKL
@property
def snake_case ( self ):
__lowerCAmelCase = 4
__lowerCAmelCase = 3
__lowerCAmelCase = (32, 32)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = jax.random.uniform(__a , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case ( self ):
__lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
| 259 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 2
while True:
if is_prime(_UpperCamelCase ):
yield num
num += 1
def _lowerCamelCase ( _UpperCamelCase = 200_0000 ):
'''simple docstring'''
return sum(takewhile(lambda _UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
# TODO Update this
__A : Optional[Any] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __A ( UpperCamelCase__ ):
lowerCAmelCase_ : List[Any] = """esm"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=3072 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=1026 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=1E-12 , UpperCAmelCase_ : List[str]="absolute" , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str] , ):
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : int = use_cache
lowerCAmelCase : Tuple = emb_layer_norm_before
lowerCAmelCase : Union[str, Any] = token_dropout
lowerCAmelCase : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
lowerCAmelCase : List[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase : Any = EsmFoldConfig(**lowerCAmelCase__ )
lowerCAmelCase : Any = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
lowerCAmelCase : List[Any] = get_default_vocab_list()
else:
lowerCAmelCase : List[str] = vocab_list
else:
lowerCAmelCase : Any = None
lowerCAmelCase : str = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , lowerCAmelCase__ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
lowerCAmelCase : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class __A :
lowerCAmelCase_ : str = None
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : float = 0
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : int = 128
lowerCAmelCase_ : "TrunkConfig" = None
def lowercase__ ( self : Union[str, Any] ):
if self.trunk is None:
lowerCAmelCase : List[Any] = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
lowerCAmelCase : Dict = TrunkConfig(**self.trunk )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = asdict(self )
lowerCAmelCase : List[str] = self.trunk.to_dict()
return output
@dataclass
class __A :
lowerCAmelCase_ : int = 48
lowerCAmelCase_ : int = 1024
lowerCAmelCase_ : int = 128
lowerCAmelCase_ : int = 32
lowerCAmelCase_ : int = 32
lowerCAmelCase_ : int = 32
lowerCAmelCase_ : float = 0
lowerCAmelCase_ : float = 0
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : Optional[int] = 128
lowerCAmelCase_ : "StructureModuleConfig" = None
def lowercase__ ( self : List[Any] ):
if self.structure_module is None:
lowerCAmelCase : List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
lowerCAmelCase : Dict = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
lowerCAmelCase : int = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase : Dict = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : str = asdict(self )
lowerCAmelCase : List[str] = self.structure_module.to_dict()
return output
@dataclass
class __A :
lowerCAmelCase_ : int = 384
lowerCAmelCase_ : int = 128
lowerCAmelCase_ : int = 16
lowerCAmelCase_ : int = 128
lowerCAmelCase_ : int = 12
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : int = 8
lowerCAmelCase_ : float = 0.1
lowerCAmelCase_ : int = 8
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : int = 7
lowerCAmelCase_ : int = 10
lowerCAmelCase_ : float = 1E-8
lowerCAmelCase_ : float = 1E5
def lowercase__ ( self : Any ):
return asdict(self )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 138 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0}
a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Union[str, Any] =batch_size
a__ : List[str] =num_channels
a__ : List[Any] =image_size
a__ : str =min_resolution
a__ : Optional[int] =max_resolution
a__ : Tuple =do_resize
a__ : Union[str, Any] =size
a__ : List[Any] =do_center_crop
a__ : List[str] =crop_size
a__ : Optional[int] =do_flip_channel_order
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : str = logging.getLogger()
def UpperCamelCase_( ) -> Any:
_lowercase : int = argparse.ArgumentParser()
parser.add_argument('-f' )
_lowercase : Optional[Any] = parser.parse_args()
return args.f
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, 'run_glue_deebert.py')
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase, 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
| 84 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
def A ( lowercase , lowercase=False ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def A ( lowercase , lowercase , lowercase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase = ''
else:
UpperCamelCase = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
UpperCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase = in_proj_bias[: config.hidden_size]
UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase = in_proj_bias[-config.hidden_size :]
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def A ( lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = DeiTConfig()
# all deit models have fine-tuned heads
UpperCamelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase = 1_000
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase = {int(lowercase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = int(deit_name[-6:-4] )
UpperCamelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
UpperCamelCase = 192
UpperCamelCase = 768
UpperCamelCase = 12
UpperCamelCase = 3
elif deit_name[9:].startswith('small' ):
UpperCamelCase = 384
UpperCamelCase = 1_536
UpperCamelCase = 12
UpperCamelCase = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
UpperCamelCase = 1_024
UpperCamelCase = 4_096
UpperCamelCase = 24
UpperCamelCase = 16
# load original model from timm
UpperCamelCase = timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase = timm_model.state_dict()
UpperCamelCase = create_rename_keys(lowercase , lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , lowercase )
# load HuggingFace model
UpperCamelCase = DeiTForImageClassificationWithTeacher(lowercase ).eval()
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCamelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCamelCase = DeiTImageProcessor(size=lowercase , crop_size=config.image_size )
UpperCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCamelCase = encoding['pixel_values']
UpperCamelCase = model(lowercase )
UpperCamelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_UpperCAmelCase : str = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 222 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "spiece.model"}
_UpperCAmelCase : Dict = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
_UpperCAmelCase : int = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[Any] = ["input_ids", "attention_mask"]
__lowercase : List[int] = []
def __init__( self , A_ , A_="<unk>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="[SEP]" , A_="[MASK]" , A_="[CLS]" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sep_token=A_ , mask_token=A_ , cls_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(A_ )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.sp_model.IdToPiece(A_ )
return token
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __UpperCamelCase ( self , A_ , A_ = False , A_ = None , A_ = True , **A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = kwargs.pop('use_source_tokenizer' , A_ )
UpperCamelCase = self.convert_ids_to_tokens(A_ , skip_special_tokens=A_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
UpperCamelCase = []
sub_texts.append(A_ )
else:
current_sub_text.append(A_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCamelCase = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(A_ ) )
else:
UpperCamelCase = ''.join(A_ )
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(A_ )
return clean_text
else:
return text
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 222 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "llama"
UpperCAmelCase_ = ["past_key_values"]
def __init__( self : List[Any], _UpperCAmelCase : Dict=3_2_0_0_0, _UpperCAmelCase : List[str]=4_0_9_6, _UpperCAmelCase : Any=1_1_0_0_8, _UpperCAmelCase : List[Any]=3_2, _UpperCAmelCase : Dict=3_2, _UpperCAmelCase : List[str]=None, _UpperCAmelCase : Dict="silu", _UpperCAmelCase : Optional[int]=2_0_4_8, _UpperCAmelCase : Union[str, Any]=0.02, _UpperCAmelCase : Tuple=1E-6, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : List[str]=0, _UpperCAmelCase : Optional[int]=1, _UpperCAmelCase : Tuple=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[Any]=False, _UpperCAmelCase : List[Any]=None, **_UpperCAmelCase : Dict, ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = num_key_value_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = rms_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = pretraining_tp
SCREAMING_SNAKE_CASE__ : str = use_cache
SCREAMING_SNAKE_CASE__ : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, tie_word_embeddings=_UpperCAmelCase, **_UpperCAmelCase, )
def A_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.rope_scaling.get("type", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self.rope_scaling.get("factor", _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 191 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : Any, _UpperCAmelCase : List[Any]=sys.maxsize ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "bilinear"
SCREAMING_SNAKE_CASE__ : Optional[int] = max_size
SCREAMING_SNAKE_CASE__ : Optional[int] = short_edge_length
def __call__( self : Optional[int], _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
for img in imgs:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE__ : List[str] = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1 )
if size == 0:
return img
SCREAMING_SNAKE_CASE__ : int = size * 1.0 / min(_UpperCAmelCase, _UpperCAmelCase )
if h < w:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
if max(_UpperCAmelCase, _UpperCAmelCase ) > self.max_size:
SCREAMING_SNAKE_CASE__ : str = self.max_size * 1.0 / max(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = newh * scale
SCREAMING_SNAKE_CASE__ : List[str] = neww * scale
SCREAMING_SNAKE_CASE__ : Any = int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ : List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pil_image.resize((neww, newh), PILImageResampling.BILINEAR )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : str = img.permute(2, 0, 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE__ : Tuple = nn.functional.interpolate(
_UpperCAmelCase, (newh, neww), mode=self.interp_method, align_corners=_UpperCAmelCase ).squeeze(0 )
img_augs.append(_UpperCAmelCase )
return img_augs
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST )
SCREAMING_SNAKE_CASE__ : Any = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE__ : List[str] = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE__ : List[Any] = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE__ : Dict = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE__ : Optional[int] = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE__ : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = lambda _UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def A_ ( self : str, _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = tuple(max(_UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
nn.functional.pad(
_UpperCAmelCase, [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], value=self.pad_value, )
for size, im in zip(_UpperCAmelCase, _UpperCAmelCase )
]
return torch.stack(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
def __call__( self : Any, _UpperCAmelCase : Dict, _UpperCAmelCase : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = [images]
if single_image:
assert len(_UpperCAmelCase ) == 1
for i in range(len(_UpperCAmelCase ) ):
if isinstance(images[i], torch.Tensor ):
images.insert(_UpperCAmelCase, images.pop(_UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i], torch.Tensor ):
images.insert(
_UpperCAmelCase, torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase ), input_format=self.input_format ) )
.to(self.device )
.float(), )
# resize smallest edge
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([im.shape[:2] for im in images] )
SCREAMING_SNAKE_CASE__ : Tuple = self.aug(_UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE__ : List[Any] = [self.normalizer(_UpperCAmelCase ) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.pad(_UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.true_divide(_UpperCAmelCase, _UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple[int, int] ) -> List[Any]:
'''simple docstring'''
assert torch.isfinite(SCREAMING_SNAKE_CASE__ ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
| 191 | 1 |
'''simple docstring'''
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = get_test_to_tester_mapping(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Union[str, Any] = get_test_to_tester_mapping(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Tuple = {'''BertModelTest''': '''BertModelTester'''}
UpperCamelCase__ :Tuple = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = get_model_to_test_mapping(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Tuple = get_model_to_test_mapping(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :int = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
UpperCamelCase__ :Optional[int] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = get_model_to_tester_mapping(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :int = get_model_to_tester_mapping(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :str = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
UpperCamelCase__ :Tuple = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) | 97 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__A : List[Any] = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=3 , __a=True , __a=True , __a=0.1 , __a=0.1 , __a=224 , __a=1000 , __a=[3, 3, 6, 4] , __a=[48, 56, 112, 220] , ):
'''simple docstring'''
__a : Optional[int] = parent
__a : Dict = batch_size
__a : Dict = num_channels
__a : str = is_training
__a : Optional[int] = use_labels
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[Any] = num_labels
__a : Tuple = image_size
__a : Optional[Any] = layer_depths
__a : Dict = embed_dims
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : int = None
if self.use_labels:
__a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__a , layer_scale_init_value=1E-5 , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : int = SwiftFormerModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = self.num_labels
__a : Optional[int] = SwiftFormerForImageClassification(__a )
model.to(__a )
model.eval()
__a : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__a : int = SwiftFormerForImageClassification(__a )
model.to(__a )
model.eval()
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(__a) : Tuple = self.prepare_config_and_inputs()
__a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
A_ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = SwiftFormerModelTester(self )
__a : Dict = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[Any] = model_class(__a )
__a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(__a )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : int = SwiftFormerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Tuple = 8
self.assertEqual(len(__a ) , __a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def _config_zero_init(__a ):
__a : List[Any] = copy.deepcopy(__a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__a , __a , 1E-1_0 )
if isinstance(getattr(__a , __a , __a ) , __a ):
__a : List[Any] = _config_zero_init(getattr(__a , __a ) )
setattr(__a , __a , __a )
return configs_no_init
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[Any] = _config_zero_init(__a )
for model_class in self.all_model_classes:
__a : Tuple = model_class(config=__a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ():
__a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(__a )
__a : Optional[Any] = self.default_image_processor
__a : Tuple = prepare_img()
__a : int = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__a )
# verify the logits
__a : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : int = torch.tensor([[-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0]] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : int = prime_factors(__a )
if is_square_free(__a ):
return -1 if len(__a ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = AutoencoderKL
_SCREAMING_SNAKE_CASE :int = """sample"""
_SCREAMING_SNAKE_CASE :Optional[Any] = 1E-2
@property
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE__ : List[Any] = 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (32, 32)
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_a )
return {"sample": image}
@property
def _a ( self ) -> Tuple:
"""simple docstring"""
return (3, 32, 32)
@property
def _a ( self ) -> int:
"""simple docstring"""
return (3, 32, 32)
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = self.model_class(**_a )
model.to(_a )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE__ : Dict = model(**_a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn_like(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_class(**_a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE__ : Tuple = model_a(**_a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE__ : Tuple = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE__ : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_a )
SCREAMING_SNAKE_CASE__ : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
SCREAMING_SNAKE_CASE__ : List[str] = model.to(_a )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE__ : List[Any] = image.to(_a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , sample_posterior=_a , generator=_a ).sample
SCREAMING_SNAKE_CASE__ : Any = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[
-4.0_0_7_8E-0_1,
-3.8_3_2_3E-0_4,
-1.2_6_8_1E-0_1,
-1.1_4_6_2E-0_1,
2.0_0_9_5E-0_1,
1.0_8_9_3E-0_1,
-8.8_2_4_7E-0_2,
-3.0_3_6_1E-0_1,
-9.8_6_4_4E-0_3,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(_a , _a , rtol=1E-2 ) )
@slow
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self , _a , _a ) -> List[Any]:
"""simple docstring"""
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_a ) for s in shape] )}.npy'''
def _a ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _a=0 , _a=(4, 3, 512, 512) , _a=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(load_hf_numpy(self.get_file_format(_a , _a ) ) ).to(_a ).to(_a )
return image
def _a ( self , _a="CompVis/stable-diffusion-v1-4" , _a=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = """fp16""" if fpaa else None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE__ : List[str] = AutoencoderKL.from_pretrained(
_a , subfolder="""vae""" , torch_dtype=_a , revision=_a , )
model.to(_a ).eval()
return model
def _a ( self , _a=0 ) -> List[str]:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(_a )
return torch.Generator(device=_a ).manual_seed(_a )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def _a ( self , _a , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_sd_image(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_generator(_a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , generator=_a , sample_posterior=_a ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE__ : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(_a , _a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_sd_vae_model(fpaa=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_sd_image(_a , fpaa=_a )
SCREAMING_SNAKE_CASE__ : Any = self.get_generator(_a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(_a , generator=_a , sample_posterior=_a ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(_a )
assert torch_all_close(_a , _a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def _a ( self , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : Any = self.get_sd_image(_a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_a ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE__ : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(_a , _a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : int = self.get_sd_image(_a , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.decode(_a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE__ : Optional[int] = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(_a )
assert torch_all_close(_a , _a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_sd_vae_model(fpaa=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_sd_image(_a , shape=(3, 4, 64, 64) , fpaa=_a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.decode(_a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE__ : Any = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(_a )
assert torch_all_close(_a , _a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _a ( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_sd_vae_model(fpaa=_a )
SCREAMING_SNAKE_CASE__ : Dict = self.get_sd_image(_a , shape=(3, 4, 64, 64) , fpaa=_a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model.decode(_a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model.decode(_a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_a , _a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : Dict = self.get_sd_image(_a , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model.decode(_a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.decode(_a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_a , _a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def _a ( self , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE__ : int = self.get_sd_image(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_generator(_a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.encode(_a ).latent_dist
SCREAMING_SNAKE_CASE__ : List[Any] = dist.sample(generator=_a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE__ : int = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(_a )
SCREAMING_SNAKE_CASE__ : str = 3E-3 if torch_device != """mps""" else 1E-2
assert torch_all_close(_a , _a , atol=_a )
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0] * len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = [1] * len(__lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCAmelCase )
while queue:
SCREAMING_SNAKE_CASE__ : str = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCAmelCase )
print(max(__lowerCAmelCase ) )
# Adjacency list of Graph
a :int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 56 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase_ (A : Tuple , A : List[Any] , A : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
snake_case__ : Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case__ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
snake_case__ : Optional[Any] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
snake_case__ : Optional[int] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
snake_case__ : Tuple = image.transpose(0 , 3 , 1 , 2 )
snake_case__ : Dict = 2.0 * image - 1.0
snake_case__ : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
snake_case__ : Any = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def lowercase_ (A : List[Any] , A : Optional[int] , A : Any , A : Optional[Any]=0.9995 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
snake_case__ : Optional[Any] = True
snake_case__ : Any = va.device
snake_case__ : Tuple = va.cpu().numpy()
snake_case__ : Dict = va.cpu().numpy()
snake_case__ : Any = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
snake_case__ : Any = (1 - t) * va + t * va
else:
snake_case__ : int = np.arccos(SCREAMING_SNAKE_CASE__ )
snake_case__ : List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
snake_case__ : str = theta_a * t
snake_case__ : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ )
snake_case__ : int = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case__ : int = sin_theta_t / sin_theta_a
snake_case__ : Dict = sa * va + sa * va
if inputs_are_torch:
snake_case__ : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def lowercase_ (A : Union[str, Any] , A : Union[str, Any] ):
snake_case__ : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
snake_case__ : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase_ (A : str , A : Tuple ):
for param in model.parameters():
snake_case__ : List[str] = value
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any], _snake_case : Union[str, Any], _snake_case : Any, _snake_case : List[str], _snake_case : int, _snake_case : Any, _snake_case : Optional[int], _snake_case : int, _snake_case : str=None, _snake_case : Optional[Any]=None, _snake_case : Tuple=None, ) ->Optional[Any]:
super().__init__()
self.register_modules(
vae=_snake_case, text_encoder=_snake_case, clip_model=_snake_case, tokenizer=_snake_case, unet=_snake_case, scheduler=_snake_case, feature_extractor=_snake_case, coca_model=_snake_case, coca_tokenizer=_snake_case, coca_transform=_snake_case, )
snake_case__ : Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size, _snake_case )
else feature_extractor.size["""shortest_edge"""]
)
snake_case__ : Dict = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, _snake_case )
set_requires_grad(self.clip_model, _snake_case )
def lowercase_ ( self : List[str], _snake_case : Optional[int] = "auto" ) ->Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def lowercase_ ( self : Optional[Any] ) ->Any:
self.enable_attention_slicing(_snake_case )
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
set_requires_grad(self.vae, _snake_case )
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
set_requires_grad(self.vae, _snake_case )
def lowercase_ ( self : List[str] ) ->List[str]:
set_requires_grad(self.unet, _snake_case )
def lowercase_ ( self : str ) ->Any:
set_requires_grad(self.unet, _snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : Optional[Any], _snake_case : Optional[int], _snake_case : Optional[int] ) ->str:
snake_case__ : Optional[int] = min(int(num_inference_steps * strength ), _snake_case )
snake_case__ : List[Any] = max(num_inference_steps - init_timestep, 0 )
snake_case__ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : Union[str, Any], _snake_case : List[Any], _snake_case : List[str], _snake_case : List[Any], _snake_case : Any, _snake_case : Optional[Any], _snake_case : Union[str, Any]=None ) ->Dict:
if not isinstance(_snake_case, torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(_snake_case )}''' )
snake_case__ : List[str] = image.to(device=_snake_case, dtype=_snake_case )
if isinstance(_snake_case, _snake_case ):
snake_case__ : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_snake_case )
]
snake_case__ : Tuple = torch.cat(_snake_case, dim=0 )
else:
snake_case__ : List[str] = self.vae.encode(_snake_case ).latent_dist.sample(_snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case__ : Any = 0.1_8_2_1_5 * init_latents
snake_case__ : Dict = init_latents.repeat_interleave(_snake_case, dim=0 )
snake_case__ : List[str] = randn_tensor(init_latents.shape, generator=_snake_case, device=_snake_case, dtype=_snake_case )
# get latents
snake_case__ : Optional[int] = self.scheduler.add_noise(_snake_case, _snake_case, _snake_case )
snake_case__ : Optional[Any] = init_latents
return latents
def lowercase_ ( self : Optional[Any], _snake_case : List[str] ) ->Any:
snake_case__ : Dict = self.coca_transform(_snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case__ : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
snake_case__ : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>', '' ).rstrip(' .,' )
def lowercase_ ( self : Optional[Any], _snake_case : List[str], _snake_case : Optional[int] ) ->int:
snake_case__ : Optional[int] = self.feature_extractor.preprocess(_snake_case )
snake_case__ : Optional[int] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case__ : List[Any] = self.clip_model.get_image_features(_snake_case )
snake_case__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=_snake_case )
snake_case__ : Tuple = image_embeddings_clip.repeat_interleave(_snake_case, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase_ ( self : List[Any], _snake_case : str, _snake_case : Tuple, _snake_case : Optional[Any], _snake_case : int, _snake_case : Any, _snake_case : List[Any], _snake_case : Dict, ) ->int:
snake_case__ : Optional[int] = latents.detach().requires_grad_()
snake_case__ : Optional[int] = self.scheduler.scale_model_input(_snake_case, _snake_case )
# predict the noise residual
snake_case__ : Optional[int] = self.unet(_snake_case, _snake_case, encoder_hidden_states=_snake_case ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case__ : Optional[int] = self.scheduler.alphas_cumprod[timestep]
snake_case__ : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ : Tuple = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case__ : int = torch.sqrt(_snake_case )
snake_case__ : Any = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, _snake_case ):
snake_case__ : Dict = self.scheduler.sigmas[index]
snake_case__ : Tuple = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case__ : Optional[Any] = 1 / 0.1_8_2_1_5 * sample
snake_case__ : Union[str, Any] = self.vae.decode(_snake_case ).sample
snake_case__ : str = (image / 2 + 0.5).clamp(0, 1 )
snake_case__ : int = transforms.Resize(self.feature_extractor_size )(_snake_case )
snake_case__ : Tuple = self.normalize(_snake_case ).to(latents.dtype )
snake_case__ : Tuple = self.clip_model.get_image_features(_snake_case )
snake_case__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=_snake_case )
snake_case__ : str = spherical_dist_loss(_snake_case, _snake_case ).mean() * clip_guidance_scale
snake_case__ : List[Any] = -torch.autograd.grad(_snake_case, _snake_case )[0]
if isinstance(self.scheduler, _snake_case ):
snake_case__ : Any = latents.detach() + grads * (sigma**2)
snake_case__ : Optional[Any] = noise_pred_original
else:
snake_case__ : int = noise_pred_original - torch.sqrt(_snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Any, _snake_case : List[Any], _snake_case : Dict, _snake_case : List[str] = None, _snake_case : Dict = None, _snake_case : Tuple = 5_1_2, _snake_case : Any = 5_1_2, _snake_case : Optional[Any] = 0.6, _snake_case : str = 5_0, _snake_case : Optional[Any] = 7.5, _snake_case : Any = 1, _snake_case : Union[str, Any] = 0.0, _snake_case : Any = 1_0_0, _snake_case : Optional[Any] = None, _snake_case : List[Any] = "pil", _snake_case : Union[str, Any] = True, _snake_case : Union[str, Any] = 0.8, _snake_case : Dict = 0.1, _snake_case : Tuple = 0.1, ) ->Optional[Any]:
if isinstance(_snake_case, _snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(_snake_case )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_snake_case, torch.Generator ) and batch_size > 1:
snake_case__ : str = [generator] + [None] * (batch_size - 1)
snake_case__ : Union[str, Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
snake_case__ : Optional[int] = [x[0] for x in coca_is_none if x[1]]
snake_case__ : int = """, """.join(_snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_snake_case ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case__ : Optional[int] = self.get_image_description(_snake_case )
if style_prompt is None:
if len(_snake_case ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case__ : str = self.get_image_description(_snake_case )
# get prompt text embeddings for content and style
snake_case__ : List[Any] = self.tokenizer(
_snake_case, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=_snake_case, return_tensors='pt', )
snake_case__ : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case__ : Optional[int] = self.tokenizer(
_snake_case, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=_snake_case, return_tensors='pt', )
snake_case__ : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case__ : Optional[Any] = slerp(_snake_case, _snake_case, _snake_case )
# duplicate text embeddings for each generation per prompt
snake_case__ : str = text_embeddings.repeat_interleave(_snake_case, dim=0 )
# set timesteps
snake_case__ : str = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case__ : Tuple = {}
if accepts_offset:
snake_case__ : int = 1
self.scheduler.set_timesteps(_snake_case, **_snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case__ : Optional[int] = self.get_timesteps(_snake_case, _snake_case, self.device )
snake_case__ : Tuple = timesteps[:1].repeat(_snake_case )
# Preprocess image
snake_case__ : str = preprocess(_snake_case, _snake_case, _snake_case )
snake_case__ : int = self.prepare_latents(
_snake_case, _snake_case, _snake_case, text_embeddings.dtype, self.device, _snake_case )
snake_case__ : List[Any] = preprocess(_snake_case, _snake_case, _snake_case )
snake_case__ : Tuple = self.prepare_latents(
_snake_case, _snake_case, _snake_case, text_embeddings.dtype, self.device, _snake_case )
snake_case__ : List[str] = slerp(_snake_case, _snake_case, _snake_case )
if clip_guidance_scale > 0:
snake_case__ : Union[str, Any] = self.get_clip_image_embeddings(_snake_case, _snake_case )
snake_case__ : Optional[int] = self.get_clip_image_embeddings(_snake_case, _snake_case )
snake_case__ : Optional[int] = slerp(
_snake_case, _snake_case, _snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case__ : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case__ : List[str] = content_text_input.input_ids.shape[-1]
snake_case__ : Optional[Any] = self.tokenizer([''], padding='max_length', max_length=_snake_case, return_tensors='pt' )
snake_case__ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case__ : Any = uncond_embeddings.repeat_interleave(_snake_case, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case__ : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case__ : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case__ : str = torch.randn(_snake_case, generator=_snake_case, device='cpu', dtype=_snake_case ).to(
self.device )
else:
snake_case__ : Optional[int] = torch.randn(_snake_case, generator=_snake_case, device=self.device, dtype=_snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case__ : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ : str = {}
if accepts_eta:
snake_case__ : str = eta
# check if the scheduler accepts generator
snake_case__ : Optional[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case__ : Tuple = generator
with self.progress_bar(total=_snake_case ):
for i, t in enumerate(_snake_case ):
# expand the latents if we are doing classifier free guidance
snake_case__ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ : Optional[int] = self.scheduler.scale_model_input(_snake_case, _snake_case )
# predict the noise residual
snake_case__ : Dict = self.unet(_snake_case, _snake_case, encoder_hidden_states=_snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case__ : str = noise_pred.chunk(2 )
snake_case__ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case__ : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case__ : Union[str, Any] = self.cond_fn(
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, )
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : Any = self.scheduler.step(_snake_case, _snake_case, _snake_case, **_snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case__ : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
snake_case__ : Any = self.vae.decode(_snake_case ).sample
snake_case__ : Optional[Any] = (image / 2 + 0.5).clamp(0, 1 )
snake_case__ : Optional[Any] = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
snake_case__ : List[str] = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_snake_case, nsfw_content_detected=_snake_case )
| 277 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase : str = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
lowercase : Dict = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
lowercase : int = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return float((preds == labels).mean() )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Any = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = float(fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : Union[str, Any] = float(pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
lowercase : Dict = float(spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(snake_case ,snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(snake_case ,snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(snake_case ,snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(snake_case ,snake_case )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366 |
def A_ ( A__ , A__ , A__ ) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
a__ : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
a__ : List[Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = XCLIPTextConfig()
# derive patch size from model name
snake_case_ = model_name.find("""patch""" )
snake_case_ = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ = XCLIPVisionConfig(patch_size=_SCREAMING_SNAKE_CASE , num_frames=_SCREAMING_SNAKE_CASE )
if "large" in model_name:
snake_case_ = 768
snake_case_ = 3_072
snake_case_ = 12
snake_case_ = 1_024
snake_case_ = 4_096
snake_case_ = 16
snake_case_ = 24
snake_case_ = 768
snake_case_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ = 336
snake_case_ = XCLIPConfig.from_text_vision_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if "large" in model_name:
snake_case_ = 768
return config
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
snake_case_ = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
snake_case_ = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
snake_case_ = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
snake_case_ = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "attn.in_proj" in key:
snake_case_ = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ = key_split[3]
snake_case_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ = val[
:dim, :
]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[
-dim:, :
]
else:
snake_case_ = val[
:dim
]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[
-dim:
]
else:
if "weight" in key:
snake_case_ = val[
:dim, :
]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[
-dim:, :
]
else:
snake_case_ = val[:dim]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ = key_split[2]
snake_case_ = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = key_split[2]
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[
dim : dim * 2
]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(_SCREAMING_SNAKE_CASE )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ = val.T
snake_case_ = val
return orig_state_dict
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if num_frames == 8:
snake_case_ = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ = """eating_spaghetti_32_frames.npy"""
snake_case_ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , )
snake_case_ = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
snake_case_ = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ = model_to_url[model_name]
snake_case_ = 8
if "16-frames" in model_name:
snake_case_ = 16
elif "shot" in model_name:
snake_case_ = 32
snake_case_ = get_xclip_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = XCLIPModel(_SCREAMING_SNAKE_CASE )
model.eval()
if "drive" in checkpoint_url:
snake_case_ = """pytorch_model.bin"""
gdown.cached_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE )
snake_case_ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
else:
snake_case_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE )["""model"""]
snake_case_ = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = XCLIPModel(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ = VideoMAEImageProcessor(size=_SCREAMING_SNAKE_CASE )
snake_case_ = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ = XCLIPProcessor(image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ = prepare_video(_SCREAMING_SNAKE_CASE )
snake_case_ = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
# Verify outputs
snake_case_ = outputs.logits_per_video
snake_case_ = logits_per_video.softmax(dim=1 )
print("""Probs:""" , _SCREAMING_SNAKE_CASE )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
snake_case_ = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
snake_case_ = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""nielsr""" )
processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""nielsr""" )
slow_tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""nielsr""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 347 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 347 | 1 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : bool , snake_case__ : bool ):
"""simple docstring"""
def run_func(snake_case__ : List[Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def run_in_eager_mode(*snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ):
return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@wraps(_SCREAMING_SNAKE_CASE )
@tf.function(experimental_compile=_SCREAMING_SNAKE_CASE )
def run_in_graph_mode(*snake_case__ : int , **snake_case__ : List[Any] ):
return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : str = random.Random()
_snake_case : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_SCREAMING_SNAKE_CASE , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "TensorFlow"
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return tf.__version__
def UpperCamelCase_ ( self: str, a_: Any, a_: str, a_: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : str = self._prepare_inference_func(_snake_case, _snake_case, _snake_case )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self: Tuple, a_: Any, a_: str, a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : List[Any] = self._prepare_train_func(_snake_case, _snake_case, _snake_case )
return self._measure_speed(_train )
def UpperCamelCase_ ( self: Dict, a_: Dict, a_: List[Any], a_: str ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], _snake_case )
_snake_case : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Tuple = self._prepare_inference_func(_snake_case, _snake_case, _snake_case )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self: Any, a_: Any, a_: Tuple, a_: Tuple ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], _snake_case )
_snake_case : int = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_snake_case : Dict = self._prepare_train_func(_snake_case, _snake_case, _snake_case )
return self._measure_memory(_train )
def UpperCamelCase_ ( self: str, a_: Optional[Any], a_: Dict, a_: Any ):
'''simple docstring'''
_snake_case : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : int = (
hasattr(_snake_case, """architectures""" )
and isinstance(config.architectures, _snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : Tuple = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : Any = __import__("""transformers""", fromlist=[model_class] )
_snake_case : Any = getattr(_snake_case, _snake_case )
_snake_case : List[Any] = model_cls(_snake_case )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : int = TF_MODEL_MAPPING[config.__class__](_snake_case )
# encoder-decoder has vocab size saved differently
_snake_case : List[Any] = config.vocab_size if hasattr(_snake_case, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : List[str] = random_input_ids(_snake_case, _snake_case, _snake_case )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(_snake_case, decoder_input_ids=_snake_case, training=_snake_case )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(_snake_case, training=_snake_case )
_snake_case : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Dict = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_snake_case : str = (
hasattr(_snake_case, """architectures""" )
and isinstance(config.architectures, _snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_snake_case : List[str] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
_snake_case : Optional[int] = __import__("""transformers""", fromlist=[model_class] )
_snake_case : List[str] = getattr(_snake_case, _snake_case )
_snake_case : Dict = model_cls(_snake_case )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_snake_case : Optional[int] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case )
# encoder-decoder has vocab size saved differently
_snake_case : Optional[Any] = config.vocab_size if hasattr(_snake_case, """vocab_size""" ) else config.encoder.vocab_size
_snake_case : Tuple = random_input_ids(_snake_case, _snake_case, _snake_case )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
_snake_case : List[str] = model(_snake_case, decoder_input_ids=_snake_case, labels=_snake_case, training=_snake_case )[0]
_snake_case : int = tf.gradients(_snake_case, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
_snake_case : Dict = model(_snake_case, labels=_snake_case, training=_snake_case )[0]
_snake_case : Any = tf.gradients(_snake_case, model.trainable_variables )
return gradients
_snake_case : Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self: Any, a_: List[str] ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(_snake_case, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_snake_case : Dict = timeit.repeat(
_snake_case, repeat=self.args.repeat, number=10, )
return min(_snake_case ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn\'t fit on GPU. {e}" )
def UpperCamelCase_ ( self: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_snake_case : List[Any] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_snake_case : Any = "N/A"
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_snake_case : Optional[Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_snake_case : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(_snake_case )
_snake_case : Union[str, Any] = meminfo.used
_snake_case : List[Any] = Memory(_snake_case )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_snake_case : Any = None
else:
_snake_case : Any = measure_peak_memory_cpu(_snake_case )
_snake_case : Dict = Memory(_snake_case ) if isinstance(_snake_case, _snake_case ) else memory_bytes
if self.args.trace_memory_line_by_line:
_snake_case : int = stop_memory_tracing(_snake_case )
if memory is None:
_snake_case : int = summary.total
else:
_snake_case : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn\'t fit on GPU. {e}" )
return "N/A", None
| 351 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) != 32:
raise ValueError("""Input must be of length 32""" )
_snake_case : Optional[int] = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[Any] = format(snake_case__ , """08x""" )[-8:]
_snake_case : Optional[Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Union[str, Any] = B""""""
for char in message:
bit_string += format(snake_case__ , """08b""" ).encode("""utf-8""" )
_snake_case : List[Any] = format(len(snake_case__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(snake_case__ ) , 5_12 ):
_snake_case : List[str] = bit_string[pos : pos + 5_12]
_snake_case : List[str] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[int] = format(snake_case__ , """032b""" )
_snake_case : str = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2 )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return (a + b) % 2**32
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Any = preprocess(snake_case__ )
_snake_case : Optional[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_snake_case : Union[str, Any] = 0x6745_2301
_snake_case : List[Any] = 0xEFCD_AB89
_snake_case : Optional[Any] = 0x98BA_DCFE
_snake_case : Optional[int] = 0x1032_5476
_snake_case : Tuple = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__ ):
_snake_case : Tuple = aa
_snake_case : str = ba
_snake_case : int = ca
_snake_case : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_snake_case : int = d ^ (b & (c ^ d))
_snake_case : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_snake_case : Tuple = c ^ (d & (b ^ c))
_snake_case : int = (5 * i + 1) % 16
elif i <= 47:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : Union[str, Any] = (3 * i + 5) % 16
else:
_snake_case : Tuple = c ^ (b | not_aa(snake_case__ ))
_snake_case : Optional[int] = (7 * i) % 16
_snake_case : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
_snake_case : List[str] = d
_snake_case : List[Any] = c
_snake_case : str = b
_snake_case : List[str] = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_snake_case : Union[str, Any] = sum_aa(snake_case__ , snake_case__ )
_snake_case : str = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = sum_aa(snake_case__ , snake_case__ )
_snake_case : List[str] = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "funnel"
UpperCamelCase__ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=[4, 4, 4] , UpperCAmelCase=None , UpperCAmelCase=2 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=64 , UpperCAmelCase=3072 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=None , UpperCAmelCase=1e-9 , UpperCAmelCase="mean" , UpperCAmelCase="relative_shift" , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = block_sizes
_UpperCAmelCase = [1] * len(UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_UpperCAmelCase = num_decoder_layers
_UpperCAmelCase = d_model
_UpperCAmelCase = n_head
_UpperCAmelCase = d_head
_UpperCAmelCase = d_inner
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_std
_UpperCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_UpperCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_UpperCAmelCase = attention_type
_UpperCAmelCase = separate_cls
_UpperCAmelCase = truncate_seq
_UpperCAmelCase = pool_q_only
super().__init__(**UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 39 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__a = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase : Union[str, Any] = '''lm_head'''
lowercase : Optional[Any] = getattr(_UpperCamelCase, _UpperCamelCase )
if weight_type is not None:
lowercase : Optional[Any] = getattr(_UpperCamelCase, _UpperCamelCase ).shape
else:
lowercase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : Any = value
elif weight_type == "weight_v":
lowercase : List[Any] = value
elif weight_type == "bias":
lowercase : List[Any] = value
else:
lowercase : Tuple = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = []
lowercase : Any = fairseq_model.state_dict()
lowercase : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase : int = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, hf_model.config.feat_extract_norm == '''group''', )
lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
lowercase : Tuple = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : List[Any] = True
if "*" in mapped_key:
lowercase : Dict = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
lowercase : Any = mapped_key.replace('''*''', _UpperCamelCase )
if "weight_g" in name:
lowercase : Dict = '''weight_g'''
elif "weight_v" in name:
lowercase : Union[str, Any] = '''weight_v'''
elif "bias" in name:
lowercase : List[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Union[str, Any] = '''weight'''
else:
lowercase : List[Any] = None
set_recursively(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Any = full_name.split('''conv_layers.''' )[-1]
lowercase : Tuple = name.split('''.''' )
lowercase : Optional[int] = int(items[0] )
lowercase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None, _UpperCamelCase=None, _UpperCamelCase=True ) ->Tuple:
"""simple docstring"""
if config_path is not None:
lowercase : Any = UniSpeechConfig.from_pretrained(_UpperCamelCase )
else:
lowercase : Dict = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase : Optional[Any] = Dictionary.load_from_json(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : Optional[int] = target_dict.pad_index
lowercase : Tuple = target_dict.bos_index
lowercase : List[Any] = target_dict.eos_index
lowercase : Dict = len(target_dict.symbols )
lowercase : Optional[Any] = os.path.join(_UpperCamelCase, '''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
lowercase : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : List[str] = 42
lowercase : str = 43
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCamelCase, _UpperCamelCase )
lowercase : Any = WavaVecaPhonemeCTCTokenizer(
_UpperCamelCase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=_UpperCamelCase, )
lowercase : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
lowercase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_UpperCamelCase, return_attention_mask=_UpperCamelCase, )
lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=_UpperCamelCase, tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
lowercase : List[str] = UniSpeechForCTC(_UpperCamelCase )
else:
lowercase : Any = UniSpeechForPreTraining(_UpperCamelCase )
if is_finetuned:
lowercase , lowercase , lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
lowercase , lowercase , lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase : Optional[int] = model[0].eval()
recursively_load_weights(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
hf_unispeech.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__a = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 173 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Any = os.path.abspath(_UpperCamelCase )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowercase : Optional[int] = tf.train.list_variables(_UpperCamelCase )
lowercase : Optional[int] = []
lowercase : Optional[int] = []
lowercase : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase : int = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase : List[Any] = name[1:]
# figure out how many levels deep the name is
lowercase : Optional[int] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(_UpperCamelCase )
# read data
lowercase : Any = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase )
names.append('''/'''.join(_UpperCamelCase ) )
arrays.append(_UpperCamelCase )
logger.info(f"""Read a total of {len(_UpperCamelCase ):,} layers""" )
# Sanity check
if len(set(_UpperCamelCase ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(_UpperCamelCase ) )})""" )
lowercase : List[str] = list(set(_UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(_UpperCamelCase, _UpperCamelCase ):
lowercase : Optional[int] = full_name.split('''/''' )
lowercase : Tuple = model
lowercase : Any = []
for i, m_name in enumerate(_UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowercase : Tuple = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowercase : int = getattr(_UpperCamelCase, '''embeddings''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowercase : int = getattr(_UpperCamelCase, '''encoder''' )
lowercase : Tuple = getattr(_UpperCamelCase, '''layer''' )
lowercase : List[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowercase : str = getattr(_UpperCamelCase, '''pooler''' )
lowercase : str = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowercase : str = getattr(_UpperCamelCase, '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowercase : List[Any] = getattr(_UpperCamelCase, '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowercase : int = getattr(_UpperCamelCase, '''token_type_embeddings''' )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowercase : Tuple = getattr(_UpperCamelCase, '''attention''' )
lowercase : str = getattr(_UpperCamelCase, '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowercase : Dict = getattr(_UpperCamelCase, '''attention''' )
lowercase : Any = getattr(_UpperCamelCase, '''output''' )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''attention''' )
lowercase : str = getattr(_UpperCamelCase, '''output''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowercase : List[str] = getattr(_UpperCamelCase, '''output''' )
lowercase : str = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowercase : Dict = getattr(_UpperCamelCase, '''output''' )
lowercase : int = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowercase : Dict = getattr(_UpperCamelCase, '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowercase : List[str] = getattr(_UpperCamelCase, '''intermediate''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowercase : Tuple = getattr(_UpperCamelCase, '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowercase : str = getattr(_UpperCamelCase, '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowercase : Dict = getattr(_UpperCamelCase, '''weight''' )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowercase : Any = '''.'''.join(_UpperCamelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''', _UpperCamelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''', _UpperCamelCase ):
lowercase : Any = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase : List[str] = array.transpose()
if pointer.shape == array.shape:
lowercase : Optional[Any] = torch.from_numpy(_UpperCamelCase )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
logger.info(f"""Loading model based on config from {config_path}...""" )
lowercase : List[Any] = BertConfig.from_json_file(_UpperCamelCase )
lowercase : Dict = BertModel(_UpperCamelCase )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict(), _UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__a = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 173 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = np.full((len(__UpperCAmelCase ), sequence_length, 2), __UpperCAmelCase )
else:
snake_case_ = np.full((len(__UpperCAmelCase ), sequence_length), __UpperCAmelCase )
for i, tensor in enumerate(__UpperCAmelCase ):
if padding_side == "right":
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = tensor[:sequence_length]
else:
snake_case_ = tensor[:sequence_length]
else:
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = tensor[:sequence_length]
else:
snake_case_ = tensor[:sequence_length]
return out_tensor.tolist()
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = ord(__UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
snake_case_ = unicodedata.category(__UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class a ( _lowerCamelCase ):
snake_case_ = 42
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = -100
snake_case_ = "pt"
def A_ ( self : int , lowercase_ : List[str] ):
import torch
snake_case_ = '''label''' if '''label''' in features[0].keys() else '''labels'''
snake_case_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
snake_case_ = self.tokenizer.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
snake_case_ = torch.tensor(batch['''entity_ids'''] ).shape[1]
snake_case_ = self.tokenizer.padding_side
if padding_side == "right":
snake_case_ = [
list(lowercase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowercase_ )) for label in labels
]
else:
snake_case_ = [
[self.label_pad_token_id] * (sequence_length - len(lowercase_ )) + list(lowercase_ ) for label in labels
]
snake_case_ = [feature['''ner_tags'''] for feature in features]
snake_case_ = padding_tensor(lowercase_ , -1 , lowercase_ , lowercase_ )
snake_case_ = [feature['''original_entity_spans'''] for feature in features]
snake_case_ = padding_tensor(lowercase_ , (-1, -1) , lowercase_ , lowercase_ )
snake_case_ = {k: torch.tensor(lowercase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 56 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = FlaxAutoencoderKL
@property
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (32, 32)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = jax.random.uniform(__SCREAMING_SNAKE_CASE , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
| 267 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__a = get_logger(__name__)
class A__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : int = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class A__ :
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = []
def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = obj
_UpperCAmelCase : int = target
_UpperCAmelCase : Optional[int] = new
_UpperCAmelCase : Any = target.split("." )[0]
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : Dict = attrs or []
def __enter__( self : List[str] ) -> int:
"""simple docstring"""
*_UpperCAmelCase , _UpperCAmelCase : List[str] = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
_UpperCAmelCase : int = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase : Tuple = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
_UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
_UpperCAmelCase : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase : Dict = getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
_UpperCAmelCase : Optional[Any] = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase : Dict = globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Optional[int] , *lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 17 | '''simple docstring'''
import baseaa
def __UpperCAmelCase ( a_: str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCAmelCase ( a_: bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
__a = 'Hello World!'
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded) | 17 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ) -> Tuple:
_UpperCamelCase = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ) -> Any:
_UpperCamelCase = generator('''Something there''' )
self.assertEqual(UpperCamelCase__ , [{'''generated_text''': ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_UpperCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}],
[{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}],
] , )
_UpperCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}],
[{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'''generated_text''': ''''''}] )
_UpperCamelCase = 3
_UpperCamelCase = generator(
'''Something there''' , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
_UpperCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase = generator('''This is a test''' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
_UpperCamelCase = generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Tuple ) -> Any:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'''generated_text''': ''''''}] )
| 256 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self , UpperCamelCase__=None ) -> Any:
'''simple docstring'''
A_ = data
A_ = None
def __repr__( self ) -> List[str]:
'''simple docstring'''
A_ = []
A_ = self
while temp:
string_rep.append(f'''{temp.data}''' )
A_ = temp.next
return "->".join(UpperCamelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
if not elements_list:
raise Exception("""The Elements List is empty""" )
A_ = A_ = Node(elements_list[0] )
for i in range(1, len(UpperCAmelCase__ ) ):
A_ = Node(elements_list[i] )
A_ = current.next
return head
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if head_node is not None and isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCAmelCase__ ( ) -> Optional[Any]:
from doctest import testmod
testmod()
A_ = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(UpperCAmelCase__ )
print("""Elements in Reverse:""" )
print_reverse(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 162 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _A ( ):
"""simple docstring"""
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(A__ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(A__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(A__ )
service.run()
if __name__ == "__main__":
main()
| 52 |
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase , __lowercase = 1, 1
__lowercase = 2
while True:
__lowercase = 0
__lowercase = fa + fa
__lowercase , __lowercase = fa, f
index += 1
for _ in str(A__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 52 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _A ( _lowercase , _lowercase ) -> tuple[str, float]:
"""simple docstring"""
__UpperCamelCase = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] )
return (item, float(_lowercase ))
def _A ( _lowercase , _lowercase ) -> tuple[str, str]:
"""simple docstring"""
__UpperCamelCase = random.randint(0 , len(_lowercase ) - 1 )
__UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = list(_lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase = random.choice(_lowercase )
return "".join(_lowercase )
def _A ( _lowercase , _lowercase , _lowercase , ) -> list[str]:
"""simple docstring"""
__UpperCamelCase = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase = int(parent_a[1] * 1_00 ) + 1
__UpperCamelCase = 10 if child_n >= 10 else child_n
for _ in range(_lowercase ):
__UpperCamelCase = population_score[random.randint(0 , _lowercase )][0]
__UpperCamelCase, __UpperCamelCase = crossover(parent_a[0] , _lowercase )
# Append new string to the population list.
pop.append(mutate(_lowercase , _lowercase ) )
pop.append(mutate(_lowercase , _lowercase ) )
return pop
def _A ( _lowercase , _lowercase , _lowercase = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
__UpperCamelCase = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_lowercase )
# Generate random starting population.
__UpperCamelCase = []
for _ in range(_lowercase ):
population.append(''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase, __UpperCamelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase = [evaluate(_lowercase , _lowercase ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowercase )
# Normalize population score to be between 0 and 1.
__UpperCamelCase = [
(item, score / len(_lowercase )) for item, score in population_score
]
# This is selection
for i in range(_lowercase ):
population.extend(select(population_score[int(_lowercase )] , _lowercase , _lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case , __snake_case , __snake_case = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 310 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = IFInpaintingSuperResolutionPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
SCREAMING_SNAKE_CASE : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : Dict=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = floats_tensor((1, 3, 1_6, 1_6) ,rng=random.Random(lowercase__ ) ).to(lowercase__ )
__lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(lowercase__ ) ).to(lowercase__ )
__lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(lowercase__ ) ).to(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : int ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 52 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : List[str] ,**lowercase__ : Optional[Any] ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 52 | 1 |
'''simple docstring'''
class a_ :
'''simple docstring'''
def __init__( self , A , A ) -> int:
_SCREAMING_SNAKE_CASE = name
_SCREAMING_SNAKE_CASE = val
def __str__( self ) -> Dict:
return f'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , A ) -> Any:
return self.val < other.val
class a_ :
'''simple docstring'''
def __init__( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = self.build_heap(A )
def __getitem__( self , A ) -> Any:
return self.get_value(A )
def snake_case_( self , A ) -> Optional[int]:
return (idx - 1) // 2
def snake_case_( self , A ) -> List[Any]:
return idx * 2 + 1
def snake_case_( self , A ) -> Dict:
return idx * 2 + 2
def snake_case_( self , A ) -> Tuple:
return self.heap_dict[key]
def snake_case_( self , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = len(A ) - 1
_SCREAMING_SNAKE_CASE = self.get_parent_idx(A )
for idx, i in enumerate(A ):
_SCREAMING_SNAKE_CASE = idx
_SCREAMING_SNAKE_CASE = i.val
for i in range(A , -1 , -1 ):
self.sift_down(A , A )
return array
def snake_case_( self , A , A ) -> List[str]:
while True:
_SCREAMING_SNAKE_CASE = self.get_left_child_idx(A ) # noqa: E741
_SCREAMING_SNAKE_CASE = self.get_right_child_idx(A )
_SCREAMING_SNAKE_CASE = idx
if l < len(A ) and array[l] < array[idx]:
_SCREAMING_SNAKE_CASE = l
if r < len(A ) and array[r] < array[smallest]:
_SCREAMING_SNAKE_CASE = r
if smallest != idx:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = array[smallest], array[idx]
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_SCREAMING_SNAKE_CASE = smallest
else:
break
def snake_case_( self , A ) -> int:
_SCREAMING_SNAKE_CASE = self.get_parent_idx(A )
while p >= 0 and self.heap[p] > self.heap[idx]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.heap[idx], self.heap[p]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_SCREAMING_SNAKE_CASE = p
_SCREAMING_SNAKE_CASE = self.get_parent_idx(A )
def snake_case_( self ) -> Optional[Any]:
return self.heap[0]
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.heap[-1], self.heap[0]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_SCREAMING_SNAKE_CASE = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def snake_case_( self , A ) -> Optional[int]:
self.heap.append(A )
_SCREAMING_SNAKE_CASE = len(self.heap ) - 1
_SCREAMING_SNAKE_CASE = node.val
self.sift_up(len(self.heap ) - 1 )
def snake_case_( self ) -> Optional[int]:
return len(self.heap ) == 0
def snake_case_( self , A , A ) -> Any:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_SCREAMING_SNAKE_CASE = new_value
_SCREAMING_SNAKE_CASE = new_value
self.sift_up(self.idx_of_element[node] )
lowercase_ = Node("""R""", -1)
lowercase_ = Node("""B""", 6)
lowercase_ = Node("""A""", 3)
lowercase_ = Node("""X""", 1)
lowercase_ = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowercase_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51 | 0 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowercase: Union[str, Any] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict=True ) -> Tuple:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE__))
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = None
_lowerCamelCase : Tuple = None
def lowercase_ ( self : Optional[int], a_ : Any, a_ : Optional[int] ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = dataset_module_factory(a_, cache_dir=a_ )
UpperCamelCase__ = import_main_class(dataset_module.module_path, dataset=a_ )
UpperCamelCase__ = builder_cls(
cache_dir=a_, config_name=a_, hash=dataset_module.hash, )
UpperCamelCase__ = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a_ ).replace(os.sep, "/" ),
config.DATASET_INFO_FILENAME,
] )
UpperCamelCase__ = cached_path(a_, cache_dir=a_ )
self.assertTrue(os.path.exists(a_ ) )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
UpperCamelCase__ = dataset_module_factory("wikipedia" , cache_dir=_UpperCamelCase )
UpperCamelCase__ = import_main_class(dataset_module.module_path )
UpperCamelCase__ = builder_cls(
cache_dir=_UpperCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCamelCase__ = None
builder_instance.download_and_prepare()
UpperCamelCase__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = dataset_module_factory("wikipedia" , cache_dir=_UpperCamelCase )
UpperCamelCase__ = import_main_class(dataset_module.module_path , dataset=_UpperCamelCase )
UpperCamelCase__ = builder_cls(
cache_dir=_UpperCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
UpperCamelCase__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert "train" in ds
assert isinstance(ds["train"] , _UpperCamelCase )
assert next(iter(ds["train"] ) ) | 31 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) ) | 31 | 1 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def __snake_case ( UpperCAmelCase_ : Any ):
# getting number of pixels in the image
lowerCamelCase_ ,lowerCamelCase_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
lowerCamelCase_ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a_ : int = imread("""image_data/lena.jpg""", 1)
# convert to its negative
a_ : Any = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 55 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = 2
lowerCamelCase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase_ )
if n > 1:
factors.append(UpperCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''mra'''
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="absolute" , _UpperCAmelCase=4 , _UpperCAmelCase="full" , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : Any = vocab_size
__a : Tuple = max_position_embeddings
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Tuple = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[Any] = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Optional[int] = type_vocab_size
__a : Union[str, Any] = layer_norm_eps
__a : int = position_embedding_type
__a : Optional[Any] = block_per_row
__a : Any = approx_mode
__a : Optional[Any] = initial_prior_first_n_blocks
__a : List[Any] = initial_prior_diagonal_n_blocks | 358 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = size if size is not None else {'''height''': 18, '''width''': 18}
__a : List[Any] = parent
__a : Dict = batch_size
__a : Dict = num_channels
__a : int = image_size
__a : Optional[Any] = min_resolution
__a : Optional[int] = max_resolution
__a : Dict = do_resize
__a : List[Any] = size
__a : int = do_normalize
__a : Optional[Any] = image_mean
__a : int = image_std
def _lowerCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : int = EfficientFormerImageProcessorTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processor
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Dict = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__a : Any = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processor
__a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processor
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__a : Tuple = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , ) | 188 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = [True] * limit
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : List[Any] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
SCREAMING_SNAKE_CASE__ : Tuple = i * 2
while index < limit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : int = index + i
SCREAMING_SNAKE_CASE__ : Tuple = [2]
for i in range(3 ,_snake_case ,2 ):
if is_prime[i]:
primes.append(_snake_case )
return primes
def lowercase_ ( _snake_case = 1_000_000 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prime_sieve(_snake_case )
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(len(_snake_case ) ):
for j in range(i + length ,len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : int = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE__ : str = j - i
SCREAMING_SNAKE_CASE__ : str = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 25 | import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = 'sshleifer/student_marian_en_ro_6_1'
_SCREAMING_SNAKE_CASE = 'sshleifer/tiny-mbart'
@require_torch
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , ) -> int:
_A = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowerCAmelCase_ , num_train_epochs=1 , distributed=lowerCAmelCase_ , extra_args_str=lowerCAmelCase_ , predict_with_generate=lowerCAmelCase_ , do_train=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , do_predict=lowerCAmelCase_ , )
_A = TrainerState.load_from_json(os.path.join(lowerCAmelCase_ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_A = [log for log in logs if """eval_loss""" in log.keys()]
_A = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_A = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , lowerCAmelCase_ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCAmelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCAmelCase ( self ) -> Dict:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ )
@require_torch_multi_gpu
def UpperCAmelCase ( self ) -> Dict:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase ( self ) -> Dict:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=lowerCAmelCase_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase ( self ) -> Tuple:
self.run_seqaseq_quick(
distributed=lowerCAmelCase_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=lowerCAmelCase_ )
@require_apex
@require_torch_gpu
def UpperCAmelCase ( self ) -> int:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCAmelCase_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_A = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_A = experiments[experiment_id]
_A = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_A = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCAmelCase_ , extra_args_str=data["""extra_args_str"""] )
_A = len(re.findall(lowerCAmelCase_ , cl.err ) )
self.assertEqual(lowerCAmelCase_ , data["""n_matches"""] )
@slow
def UpperCAmelCase ( self ) -> Dict:
_A = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=lowerCAmelCase_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=lowerCAmelCase_ , )
# Check metrics
_A = TrainerState.load_from_json(os.path.join(lowerCAmelCase_ , """trainer_state.json""" ) ).log_history
_A = [log for log in logs if """eval_loss""" in log.keys()]
_A = eval_metrics[0]
_A = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , lowerCAmelCase_ )
# test if do_predict saves generations and metrics
_A = os.listdir(lowerCAmelCase_ )
_A = {os.path.basename(lowerCAmelCase_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCAmelCase ( self ) -> Optional[Any]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCAmelCase_ ) -> Tuple[int, float]:
_A = """--skip_memory_metrics 0"""
_A = self.run_trainer(
max_len=1_28 , model_name=lowerCAmelCase_ , learning_rate=3E-4 , num_train_epochs=1 , optim=lowerCAmelCase_ , distributed=lowerCAmelCase_ , extra_args_str=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , do_predict=lowerCAmelCase_ , n_gpus_to_use=1 , )
# Check metrics
_A = TrainerState.load_from_json(Path(lowerCAmelCase_ , """trainer_state.json""" ) ).log_history
_A = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_A = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_A = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_A , _A , _A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_A , _A , _A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_A = gpu_peak_mem_orig + gpu_alloc_mem_orig
_A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_A = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_A = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCAmelCase_ , lowerCAmelCase_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
lowerCAmelCase_ , lowerCAmelCase_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
lowerCAmelCase_ , lowerCAmelCase_ , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3E-3 , lowerCAmelCase_ = "adafactor" , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , ) -> str:
_A = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_A = self.get_auto_remove_tmp_dir()
_A = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCAmelCase_ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCAmelCase_ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_A = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCAmelCase_ )}
'''.split()
_A = """
--do_predict
""".split()
_A = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_A = get_gpu_count()
_A = get_torch_dist_unique_port()
_A = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_A = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase_ , env=self.get_env() )
else:
_A = ["""run_translation.py"""] + args
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
main()
return output_dir
| 180 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
UpperCamelCase_ = hex_num[0] == "-"
if is_negative:
UpperCamelCase_ = hex_num[1:]
try:
UpperCamelCase_ = int(UpperCamelCase_ , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
UpperCamelCase_ = ""
while int_num > 0:
UpperCamelCase_ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
for param in module.parameters():
UpperCamelCase_ = False
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase_ = datetime.now()
UpperCamelCase_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 328 | 0 |
from __future__ import annotations
__a = 1.6021E-19 # units = C
def a ( snake_case__: float , snake_case__: float , snake_case__: float , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase : Any = "."
if __name__ == "__main__":
UpperCamelCase : Optional[int] = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[int] = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase : Optional[int] = line.strip()
UpperCamelCase : Tuple = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase : Dict = "\n".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 366 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase : List[Any] = {
"Salesforce/codegen-350M-mono": 2_0_4_8,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = CodeGenTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
if kwargs.pop('add_bos_token' , __UpperCAmelCase ):
__UpperCamelCase = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = super().decode(
token_ids=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , **__UpperCAmelCase , )
if truncate_before_pattern is not None and len(__UpperCAmelCase ) > 0:
__UpperCamelCase = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
return decoded_text
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
def find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = pattern.search(__UpperCAmelCase , __UpperCAmelCase )
return m.start() if m else -1
__UpperCamelCase = [re.compile(__UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCamelCase = list(re.finditer('^print' , __UpperCAmelCase , re.MULTILINE ) )
if len(__UpperCAmelCase ) > 1:
__UpperCamelCase = completion[: prints[1].start()]
__UpperCamelCase = list(re.finditer('^def' , __UpperCAmelCase , re.MULTILINE ) )
if len(__UpperCAmelCase ) > 1:
__UpperCamelCase = completion[: defs[1].start()]
__UpperCamelCase = 0
__UpperCamelCase = [
pos for pos in [find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(__UpperCAmelCase ) > 0:
return completion[: min(__UpperCAmelCase )]
else:
return completion
| 263 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Tuple:
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser('test' )
else:
lowerCamelCase__ : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ : List[str] = script_name
else:
lowerCamelCase__ : List[Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase__ : str = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ : Dict = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Any = test_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return x if y == 0 else greatest_common_divisor(_UpperCAmelCase ,x % y )
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return (x * y) // greatest_common_divisor(_UpperCAmelCase ,_UpperCAmelCase )
def a_ ( _UpperCAmelCase : int = 20 ) -> int:
__snake_case : Optional[Any] = 1
for i in range(1 ,n + 1 ):
__snake_case : Optional[int] = lcm(_UpperCAmelCase ,_UpperCAmelCase )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 0 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__A : int = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__A : Union[str, Any] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__A : Any = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__A : List[Any] = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
__A : Union[str, Any] = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
__A : int = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : int = checkpoint[f"{old_prefix}.in_layers.0.weight"]
lowerCAmelCase : List[str] = checkpoint[f"{old_prefix}.in_layers.0.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[f"{old_prefix}.in_layers.2.weight"]
lowerCAmelCase : str = checkpoint[f"{old_prefix}.in_layers.2.bias"]
lowerCAmelCase : int = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
lowerCAmelCase : Tuple = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
lowerCAmelCase : Any = checkpoint[f"{old_prefix}.out_layers.0.weight"]
lowerCAmelCase : List[Any] = checkpoint[f"{old_prefix}.out_layers.0.bias"]
lowerCAmelCase : List[str] = checkpoint[f"{old_prefix}.out_layers.3.weight"]
lowerCAmelCase : Optional[int] = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
lowerCAmelCase : str = checkpoint[f"{old_prefix}.skip_connection.weight"]
lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3, dim=0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3, dim=0 )
lowerCAmelCase : int = checkpoint[f"{old_prefix}.norm.weight"]
lowerCAmelCase : List[Any] = checkpoint[f"{old_prefix}.norm.bias"]
lowerCAmelCase : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Tuple = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Union[str, Any] = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Dict = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : str = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Tuple = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase : Optional[Any] = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = torch.load(_UpperCAmelCase, map_location='cpu' )
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Any = checkpoint['time_embed.0.weight']
lowerCAmelCase : Any = checkpoint['time_embed.0.bias']
lowerCAmelCase : List[Any] = checkpoint['time_embed.2.weight']
lowerCAmelCase : Tuple = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase : str = checkpoint['label_emb.weight']
lowerCAmelCase : Optional[Any] = checkpoint['input_blocks.0.0.weight']
lowerCAmelCase : Optional[Any] = checkpoint['input_blocks.0.0.bias']
lowerCAmelCase : int = unet_config['down_block_types']
lowerCAmelCase : str = unet_config['layers_per_block']
lowerCAmelCase : Union[str, Any] = unet_config['attention_head_dim']
lowerCAmelCase : Any = unet_config['block_out_channels']
lowerCAmelCase : Dict = 1
lowerCAmelCase : List[str] = channels_list[0]
for i, layer_type in enumerate(_UpperCAmelCase ):
lowerCAmelCase : List[str] = channels_list[i]
lowerCAmelCase : Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = f"down_blocks.{i}.resnets.{j}"
lowerCAmelCase : List[Any] = f"input_blocks.{current_layer}.0"
lowerCAmelCase : int = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase : List[Any] = convert_resnet(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, has_skip=_UpperCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_UpperCAmelCase ):
lowerCAmelCase : List[Any] = f"down_blocks.{i}.resnets.{j}"
lowerCAmelCase : Union[str, Any] = f"input_blocks.{current_layer}.0"
lowerCAmelCase : Dict = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase : List[str] = convert_resnet(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, has_skip=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = f"down_blocks.{i}.attentions.{j}"
lowerCAmelCase : List[str] = f"input_blocks.{current_layer}.1"
lowerCAmelCase : Any = convert_attention(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
current_layer += 1
if i != len(_UpperCAmelCase ) - 1:
lowerCAmelCase : List[Any] = f"down_blocks.{i}.downsamplers.0"
lowerCAmelCase : List[str] = f"input_blocks.{current_layer}.0"
lowerCAmelCase : Optional[Any] = convert_resnet(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
current_layer += 1
lowerCAmelCase : List[str] = current_channels
# hardcoded the mid-block for now
lowerCAmelCase : Tuple = 'mid_block.resnets.0'
lowerCAmelCase : Dict = 'middle_block.0'
lowerCAmelCase : Any = convert_resnet(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = 'mid_block.attentions.0'
lowerCAmelCase : Optional[int] = 'middle_block.1'
lowerCAmelCase : List[Any] = convert_attention(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : List[Any] = 'mid_block.resnets.1'
lowerCAmelCase : List[Any] = 'middle_block.2'
lowerCAmelCase : Tuple = convert_resnet(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(_UpperCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase : Optional[Any] = f"up_blocks.{i}.resnets.{j}"
lowerCAmelCase : Optional[int] = f"output_blocks.{current_layer}.0"
lowerCAmelCase : Any = convert_resnet(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, has_skip=_UpperCAmelCase )
current_layer += 1
if i != len(_UpperCAmelCase ) - 1:
lowerCAmelCase : str = f"up_blocks.{i}.upsamplers.0"
lowerCAmelCase : Optional[Any] = f"output_blocks.{current_layer-1}.1"
lowerCAmelCase : Dict = convert_resnet(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase : Any = f"up_blocks.{i}.resnets.{j}"
lowerCAmelCase : Dict = f"output_blocks.{current_layer}.0"
lowerCAmelCase : Optional[int] = convert_resnet(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, has_skip=_UpperCAmelCase )
lowerCAmelCase : Optional[int] = f"up_blocks.{i}.attentions.{j}"
lowerCAmelCase : List[str] = f"output_blocks.{current_layer}.1"
lowerCAmelCase : Optional[Any] = convert_attention(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
current_layer += 1
if i != len(_UpperCAmelCase ) - 1:
lowerCAmelCase : Optional[Any] = f"up_blocks.{i}.upsamplers.0"
lowerCAmelCase : Union[str, Any] = f"output_blocks.{current_layer-1}.2"
lowerCAmelCase : List[str] = convert_resnet(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : List[str] = checkpoint['out.0.weight']
lowerCAmelCase : List[str] = checkpoint['out.0.bias']
lowerCAmelCase : List[Any] = checkpoint['out.2.weight']
lowerCAmelCase : str = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__A : Optional[int] = parser.parse_args()
__A : List[Any] = strabool(args.class_cond)
__A : Dict = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
__A : Dict = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : Optional[int] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__A : Any = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
__A : List[str] = None
__A : Any = con_pt_to_diffuser(args.unet_path, unet_config)
__A : List[str] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__A : Optional[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__A : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
__A : Union[str, Any] = CMStochasticIterativeScheduler(**scheduler_config)
__A : Any = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 138 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__A : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
lowerCAmelCase_ : int = 1_0000
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ : List[Any] = ParquetConfig
def lowercase__ ( self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Dict ):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
lowerCAmelCase : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
lowerCAmelCase : int = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase : Dict = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCAmelCase : Any = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase : Dict = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'rb' ) as f:
lowerCAmelCase : int = datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase_ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'files': files} ) )
return splits
def lowercase__ ( self : Any , UpperCAmelCase_ : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase : Union[str, Any] = table_cast(UpperCAmelCase_ , self.info.features.arrow_schema )
return pa_table
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
with open(UpperCAmelCase_ , 'rb' ) as f:
lowerCAmelCase : Optional[Any] = pq.ParquetFile(UpperCAmelCase_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCAmelCase : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(UpperCAmelCase_ )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}" )
raise
| 138 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = {}
SCREAMING_SNAKE_CASE_ : Any = job['started_at']
SCREAMING_SNAKE_CASE_ : Any = job['completed_at']
SCREAMING_SNAKE_CASE_ : Optional[int] = date_parser.parse(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = date_parser.parse(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE_ : Dict = start
SCREAMING_SNAKE_CASE_ : Optional[int] = end
SCREAMING_SNAKE_CASE_ : Optional[Any] = duration_in_min
return job_info
def a__ ( A__, A__=None ):
SCREAMING_SNAKE_CASE_ : List[str] = None
if token is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
SCREAMING_SNAKE_CASE_ : str = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ : Tuple = requests.get(A__, headers=A__ ).json()
SCREAMING_SNAKE_CASE_ : int = {}
try:
job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} )
SCREAMING_SNAKE_CASE_ : int = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = requests.get(url + F'''&page={i + 2}''', headers=A__ ).json()
job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowerCAmelCase__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
lowerCAmelCase__ : str =parser.parse_args()
lowerCAmelCase__ : List[Any] =get_job_time(args.workflow_run_id)
lowerCAmelCase__ : List[Any] =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 358 |
from __future__ import annotations
def a__ ( A__ ):
return len(set(A__ ) ) == len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = inspect.getfile(accelerate.test_utils )
__lowercase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__lowercase = test_metrics
@require_cpu
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
__lowercase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() ) | 210 | import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__a : List[Any] = logging.getLogger(__name__)
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=lowercase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=lowercase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__lowercase = parser.parse_args()
return args
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
def fn(lowercase ):
return tokenizer(examples['''text'''] )
return fn
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__lowercase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__lowercase = tf.train.Features(feature=lowercase )
__lowercase = tf.train.Example(features=lowercase )
__lowercase = example.SerializeToString()
records.append(lowercase )
return records
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase = min(len(lowercase ) , args.limit )
__lowercase = dataset.select(range(lowercase ) )
print(F"Limiting the dataset to {args.limit} entries." )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
__lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase = tokenize_function(lowercase )
__lowercase = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase ):
# Concatenate all texts.
__lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
__lowercase = 0
__lowercase = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
__lowercase = grouped_dataset[shard : shard + args.shard_size]
__lowercase = len(dataset_snapshot['''input_ids'''] )
__lowercase = os.path.join(lowercase , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__lowercase = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
__lowercase = serialized_examples[i]
out_file.write(lowercase )
print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(F"Total {args.split} records: {total_records}" , file=lowercase )
if __name__ == "__main__":
__a : Optional[Any] = parse_args()
main(args) | 210 | 1 |
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
"""simple docstring"""
if not (isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
a :Optional[int] = len(UpperCAmelCase_ )
a :str = len(UpperCAmelCase_ )
a :List[Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
a :Dict = 0
a :Union[str, Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
a :Union[str, Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
a :Dict = i
a :Optional[Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case : Any = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
inspect_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
a :List[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ):
"""simple docstring"""
inspect_metric(UpperCAmelCase_ , UpperCAmelCase_ )
a :Dict = path + '''.py'''
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
a :List[str] = get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
with pytest.raises(UpperCAmelCase_ ):
get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
"""simple docstring"""
a :List[str] = get_dataset_config_names(UpperCAmelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Optional[int] = get_dataset_infos(UpperCAmelCase_ )
assert list(infos.keys() ) == expected_configs
a :Union[str, Any] = expected_configs[0]
assert expected_config in infos
a :List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Union[str, Any] = get_dataset_infos(UpperCAmelCase_ )
assert expected_config in infos
a :int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ):
"""simple docstring"""
with pytest.raises(UpperCAmelCase_ ):
get_dataset_split_names(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
| 281 | 0 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : str = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str ) -> str:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
_SCREAMING_SNAKE_CASE =TOKENIZER_CLASSES
else:
_SCREAMING_SNAKE_CASE ={tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + 'Fast' )}
logger.info(f"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
_SCREAMING_SNAKE_CASE =TOKENIZER_CLASSES[tokenizer_name]
_SCREAMING_SNAKE_CASE =True
if checkpoint_name is None:
_SCREAMING_SNAKE_CASE =list(tokenizer_class.max_model_input_sizes.keys() )
else:
_SCREAMING_SNAKE_CASE =[checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
_SCREAMING_SNAKE_CASE =tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =checkpoint.split('/' )
_SCREAMING_SNAKE_CASE =os.path.join(__UpperCamelCase , __UpperCamelCase )
elif add_prefix:
_SCREAMING_SNAKE_CASE =checkpoint
_SCREAMING_SNAKE_CASE =dump_path
else:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_SCREAMING_SNAKE_CASE =list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_SCREAMING_SNAKE_CASE =file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
_SCREAMING_SNAKE_CASE =os.path.join(__UpperCamelCase , __UpperCamelCase )
_SCREAMING_SNAKE_CASE =None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
_SCREAMING_SNAKE_CASE =tokenizer.save_pretrained(
__UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase )
logger.info(f"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__UpperCamelCase )
logger.info(f"=> removing {file_name}" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 47 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
# Base Case
if index == len(__UpperCamelCase ):
return True
# Recursive Step
for i in range(__UpperCamelCase ):
if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]:
UpperCamelCase = [-1] * len(__UpperCamelCase )
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ):
return colored_vertices
return []
| 321 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowercase: Optional[Any] = data_utils.TransfoXLTokenizer
__lowercase: Union[str, Any] = data_utils.TransfoXLCorpus
__lowercase: Any = data_utils
__lowercase: Optional[Any] = data_utils
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCamelCase , "rb" ) as fp:
UpperCamelCase__ = pickle.load(_UpperCamelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase__ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
UpperCamelCase__ = corpus.vocab.__dict__
torch.save(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , _UpperCamelCase )
UpperCamelCase__ = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(_UpperCamelCase , _UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase__ = TransfoXLConfig()
else:
UpperCamelCase__ = TransfoXLConfig.from_json_file(_UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
UpperCamelCase__ = TransfoXLLMHeadModel(_UpperCamelCase )
UpperCamelCase__ = load_tf_weights_in_transfo_xl(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
UpperCamelCase__ = os.path.join(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F'Save PyTorch model to {os.path.abspath(_UpperCamelCase )}' )
torch.save(model.state_dict() , _UpperCamelCase )
print(F'Save configuration file to {os.path.abspath(_UpperCamelCase )}' )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase: Any = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__lowercase: Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 31 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :int = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''mobilenet_v1'''
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : Any=1.0 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : Dict="relu6" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=0.9_99 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , **__SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = min_depth
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return 1E-4
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__A = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__A = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__A = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__SCREAMING_SNAKE_CASE : Tuple = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = None ) ->List[Any]:
"""simple docstring"""
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
snake_case_ = os.path.abspath("""examples""" )
for item in os.listdir(UpperCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if os.path.isfile(UpperCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase_ , feature_script=UpperCAmelCase_ , tested_section="""main()""" if parser_only else """training_function()""" , ):
snake_case_ = compare_against_test(
os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = """\n""".join(UpperCAmelCase_ )
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(UpperCAmelCase_ , """""" )
self.assertEqual(UpperCAmelCase_ , """""" )
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , UpperCAmelCase_ )
self.one_complete_example("""complete_nlp_example.py""" , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
snake_case_ = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.one_complete_example("""complete_cv_example.py""" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""})
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = False
@classmethod
def lowerCAmelCase ( cls : Any ) ->List[str]:
"""simple docstring"""
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case_ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def lowerCAmelCase ( cls : List[str] ) ->int:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
snake_case_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
self.assertNotIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
else:
self.assertIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
snake_case_ = re.findall("""({.+})""" , UpperCAmelCase_ )
snake_case_ = [r for r in results if """accuracy""" in r][-1]
snake_case_ = ast.literal_eval(UpperCAmelCase_ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , """tracking""" ) ) )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 233 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
if attention_mask is None:
snake_case_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __A :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[Any]="relu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[str]=20 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Optional[Any]=0 , ) ->Dict:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.eos_token_id # Eos Token
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = self.get_config()
snake_case_ = prepare_mam_aaa_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = MaMaaaModel(config=UpperCAmelCase_ ).get_decoder().to(UpperCAmelCase_ ).eval()
snake_case_ = inputs_dict["""input_ids"""]
snake_case_ = inputs_dict["""attention_mask"""]
snake_case_ = inputs_dict["""head_mask"""]
# first forward pass
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )["""last_hidden_state"""]
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[
"""last_hidden_state"""
]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 ) )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
snake_case_ = MaMaaaModel(config=UpperCAmelCase_ ).to(UpperCAmelCase_ ).eval()
snake_case_ = model(**UpperCAmelCase_ )
snake_case_ = outputs.encoder_last_hidden_state
snake_case_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase_ )
snake_case_ = MaMaaaEncoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
snake_case_ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase_ )
snake_case_ = MaMaaaDecoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
snake_case_ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__lowercase: Union[str, Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__lowercase: Tuple = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__lowercase: Dict = True
__lowercase: List[Any] = True
__lowercase: Union[str, Any] = False
__lowercase: Optional[int] = False
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) ->str:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
snake_case_ = MaMaaaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
snake_case_ , snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not self.is_encoder_decoder:
snake_case_ = inputs["""input_ids"""]
del inputs["input_ids"]
else:
snake_case_ = inputs["""input_ids"""]
snake_case_ = inputs.get("""decoder_input_ids""" , UpperCAmelCase_ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , UpperCAmelCase_ )
snake_case_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case_ = wte(UpperCAmelCase_ )
else:
snake_case_ = wte(UpperCAmelCase_ )
snake_case_ = wte(UpperCAmelCase_ )
with torch.no_grad():
model(**UpperCAmelCase_ )[0]
def lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = input_dict["""input_ids"""]
snake_case_ = input_ids.ne(1 ).to(UpperCAmelCase_ )
snake_case_ = MaMaaaForConditionalGeneration(UpperCAmelCase_ ).eval().to(UpperCAmelCase_ )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
model.generate(num_beams=4 , do_sample=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , num_return_sequences=3 )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
snake_case_ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ )
snake_case_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
snake_case_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
snake_case_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )[0]
snake_case_ = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
snake_case_ = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=UpperCAmelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ )
# change to intended input
snake_case_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
snake_case_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
snake_case_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )[0]
snake_case_ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
snake_case_ = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=UpperCAmelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ )
snake_case_ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
snake_case_ = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case_ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""" )
snake_case_ = model.generate(
input_ids=dct["""input_ids"""].to(UpperCAmelCase_ ) , attention_mask=dct["""attention_mask"""].to(UpperCAmelCase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
snake_case_ = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
snake_case_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
assert generated == expected_en
| 233 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : str = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE_ : Dict = False
@property
def A ( self : Any ) -> Any:
return 32
@property
def A ( self : Optional[int] ) -> Any:
return 32
@property
def A ( self : Dict ) -> int:
return self.time_input_dim
@property
def A ( self : Tuple ) -> str:
return self.time_input_dim * 4
@property
def A ( self : Any ) -> str:
return 1_00
@property
def A ( self : str ) -> List[str]:
torch.manual_seed(0 )
lowercase_ : List[Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase_ : Dict = UNetaDConditionModel(**A )
return model
@property
def A ( self : Optional[Any] ) -> Union[str, Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
lowercase_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : Tuple = self.dummy_unet
lowercase_ : int = self.dummy_movq
lowercase_ : List[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase_ : str = DDIMScheduler(**A )
lowercase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : Optional[int] , A : int , A : List[str]=0 ) -> int:
lowercase_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A )
lowercase_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
lowercase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith('''mps''' ):
lowercase_ : Optional[Any] = torch.manual_seed(A )
else:
lowercase_ : List[Any] = torch.Generator(device=A ).manual_seed(A )
lowercase_ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Any ) -> List[Any]:
lowercase_ : List[str] = '''cpu'''
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**A )
lowercase_ : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowercase_ : Dict = pipe(**self.get_dummy_inputs(A ) )
lowercase_ : str = output.images
lowercase_ : int = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
lowercase_ : Dict = image[0, -3:, -3:, -1]
lowercase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : List[str] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any ) -> Optional[int]:
lowercase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase_ : Optional[int] = init_image.resize((5_12, 5_12) )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase_ : Optional[int] = torch.from_numpy(np.array(A ) ).float() / 255.0
lowercase_ : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase_ : Optional[Any] = '''A robot, 4k photo'''
lowercase_ : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A )
lowercase_ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase_ : int = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowercase_ : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ , lowercase_ : int = pipe_prior(
A , image=A , strength=0.85 , generator=A , negative_prompt='''''' , ).to_tuple()
lowercase_ : str = pipeline(
image=A , image_embeds=A , negative_image_embeds=A , hint=A , generator=A , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
lowercase_ : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A , A )
| 33 |
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0_0 ):
lowercase_ , lowercase_ : str = 1, 1
lowercase_ : List[str] = 2
while True:
lowercase_ : Tuple = 0
lowercase_ : List[Any] = fa + fa
lowercase_ , lowercase_ : Optional[int] = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33 | 1 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''M-CLIP'''
def __init__( self : List[str] , UpperCAmelCase_ : str=1_024 , UpperCAmelCase_ : Optional[Any]=768 , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Any = transformerDimSize
__UpperCAmelCase : Union[str, Any] = imageDimSize
super().__init__(**UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MCLIPConfig
def __init__( self : Optional[Any] , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : int = XLMRobertaModel(UpperCAmelCase_ )
__UpperCAmelCase : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.transformer(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
__UpperCAmelCase : List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase_ ), embs
| 368 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ : Optional[Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __UpperCamelCase ( _UpperCAmelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__UpperCAmelCase : List[str] = list(s_dict.keys() )
for key in keys:
__UpperCAmelCase : int = R".*/layers_(\d+)"
__UpperCAmelCase : List[str] = key
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = re.sub(R"layers_(\d+)", R"block/\1/layer", _UpperCAmelCase )
__UpperCAmelCase : Any = R"(encoder|decoder)\/"
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = re.match(_UpperCAmelCase, _UpperCAmelCase ).groups()
if groups[0] == "encoder":
__UpperCAmelCase : Optional[Any] = re.sub(R"/mlp/", R"/1/mlp/", _UpperCAmelCase )
__UpperCAmelCase : List[Any] = re.sub(R"/pre_mlp_layer_norm/", R"/1/layer_norm/", _UpperCAmelCase )
elif groups[0] == "decoder":
__UpperCAmelCase : List[Any] = re.sub(R"/mlp/", R"/2/mlp/", _UpperCAmelCase )
__UpperCAmelCase : Any = re.sub(R"/pre_mlp_layer_norm/", R"/2/layer_norm/", _UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__UpperCAmelCase : List[str] = new_key.replace(_UpperCAmelCase, _UpperCAmelCase )
print(F"{key} -> {new_key}" )
__UpperCAmelCase : Any = s_dict.pop(_UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Tuple = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__UpperCAmelCase : Any = s_dict[key].shape[0]
__UpperCAmelCase : str = s_dict[key]
for idx in range(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/', 'nested fstring' )}" )
s_dict.pop(_UpperCAmelCase )
return s_dict
lowerCAmelCase__ : Optional[Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCAmelCase, "r" ) as f:
__UpperCAmelCase : List[Any] = f.read()
__UpperCAmelCase : Union[str, Any] = re.findall(R"(.*) = ([0-9.]*)", _UpperCAmelCase )
__UpperCAmelCase : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__UpperCAmelCase : Tuple = float(_UpperCAmelCase ) if "." in value else int(_UpperCAmelCase )
__UpperCAmelCase : str = re.findall(R"(.*activations) = \(\'(.*)\',\)", _UpperCAmelCase )[0]
__UpperCAmelCase : int = str(activation[1] )
__UpperCAmelCase : int = num_experts
__UpperCAmelCase : List[str] = SwitchTransformersConfig(**_UpperCAmelCase )
return config
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase="./", _UpperCAmelCase=8 ):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
__UpperCAmelCase : Dict = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
if gin_file is not None:
__UpperCAmelCase : int = convert_gin_to_config(_UpperCAmelCase, _UpperCAmelCase )
else:
__UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained(_UpperCAmelCase )
__UpperCAmelCase : Any = SwitchTransformersForConditionalGeneration(_UpperCAmelCase )
__UpperCAmelCase : str = flax_params["target"]
__UpperCAmelCase : Any = flatten_dict(_UpperCAmelCase, sep="/" )
__UpperCAmelCase : Optional[Any] = rename_keys(_UpperCAmelCase )
__UpperCAmelCase : Any = unflatten_dict(_UpperCAmelCase, sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCAmelCase, _UpperCAmelCase )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowerCAmelCase__ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 37 | 0 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar('KEY')
_snake_case = TypeVar('VAL')
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class UpperCamelCase ( Generic[KEY, VAL] ):
UpperCamelCase : KEY
UpperCamelCase : VAL
class UpperCamelCase ( _Item ):
def __init__( self : Optional[int] ) -> None:
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __bool__( self : List[str] ) -> bool:
return False
_snake_case = _DeletedItem()
class UpperCamelCase ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : float = 0.7_5 ) -> None:
_a : List[Any] = initial_block_size
_a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_a : Optional[int] = capacity_factor
_a : int = 0
def _lowercase ( self : List[str] , UpperCAmelCase__ : KEY ) -> int:
return hash(UpperCAmelCase__ ) % len(self._buckets )
def _lowercase ( self : Dict , UpperCAmelCase__ : int ) -> int:
return (ind + 1) % len(self._buckets )
def _lowercase ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : KEY , UpperCAmelCase__ : VAL ) -> bool:
_a : Tuple = self._buckets[ind]
if not stored:
_a : Any = _Item(UpperCAmelCase__ , UpperCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
_a : List[Any] = _Item(UpperCAmelCase__ , UpperCAmelCase__ )
return True
else:
return False
def _lowercase ( self : int ) -> bool:
_a : Optional[int] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_a : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowercase ( self : List[str] , UpperCAmelCase__ : int ) -> None:
_a : Any = self._buckets
_a : Union[str, Any] = [None] * new_size
_a : str = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _lowercase ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def _lowercase ( self : Dict ) -> None:
self._resize(len(self._buckets ) // 2 )
def _lowercase ( self : Dict , UpperCAmelCase__ : KEY ) -> Iterator[int]:
_a : str = self._get_bucket_index(UpperCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
_a : Union[str, Any] = self._get_next_ind(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : KEY , UpperCAmelCase__ : VAL ) -> None:
for ind in self._iterate_buckets(UpperCAmelCase__ ):
if self._try_set(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
break
def __setitem__( self : Union[str, Any] , UpperCAmelCase__ : KEY , UpperCAmelCase__ : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(UpperCAmelCase__ , UpperCAmelCase__ )
def __delitem__( self : List[str] , UpperCAmelCase__ : KEY ) -> None:
for ind in self._iterate_buckets(UpperCAmelCase__ ):
_a : Union[str, Any] = self._buckets[ind]
if item is None:
raise KeyError(UpperCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
_a : Any = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , UpperCAmelCase__ : KEY ) -> VAL:
for ind in self._iterate_buckets(UpperCAmelCase__ ):
_a : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCAmelCase__ )
def __len__( self : int ) -> int:
return self._len
def __iter__( self : int ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Tuple ) -> str:
_a : int = """ ,""".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 294 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict:
if k in (0.0_4, 0.0_6):
_a : List[str] = k
_a : List[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Dict ) -> str:
return str(self.k )
def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
_a : Dict = cva.imread(UpperCAmelCase__ , 0 )
_a , _a : List[Any] = img.shape
_a : list[list[int]] = []
_a : List[Any] = img.copy()
_a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB )
_a , _a : Any = np.gradient(UpperCAmelCase__ )
_a : Tuple = dx**2
_a : Union[str, Any] = dy**2
_a : Union[str, Any] = dx * dy
_a : int = 0.0_4
_a : List[str] = self.window_size // 2
for y in range(UpperCAmelCase__ , h - offset ):
for x in range(UpperCAmelCase__ , w - offset ):
_a : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Any = (wxx * wyy) - (wxy**2)
_a : Tuple = wxx + wyy
_a : Any = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 294 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 365 |
import baseaa
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : bytes ) -> str:
return baseaa.baadecode(__UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
_lowerCamelCase = 'Hello World!'
_lowerCamelCase = baseaa_encode(test)
print(encoded)
_lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 177 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , ):
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , ):
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
a__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A__(unittest.TestCase ):
"""simple docstring"""
_A : List[str] = StableDiffusionLDMaDPipeline
_A : int = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a_ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
a_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ : Tuple = CLIPTextModel(_lowercase )
a_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Any:
if str(_lowercase ).startswith("""mps""" ):
a_ : Optional[Any] = torch.manual_seed(_lowercase )
else:
a_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Any = self.get_dummy_components()
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Union[str, Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : int = self.get_dummy_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : Tuple = output.rgb, output.depth
a_ : Union[str, Any] = rgb[0, -3:, -3:, -1]
a_ : Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[Any] = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
a_ : int = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = self.get_dummy_components()
a_ : Optional[int] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Optional[Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
a_ : Optional[int] = ldmad_pipe(**_lowercase )
a_ , a_ : Any = output.rgb, output.depth
a_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs.pop("""prompt""" )]
a_ : List[Any] = ldmad_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
a_ : int = text_inputs["""input_ids"""].to(_lowercase )
a_ : Any = ldmad_pipe.text_encoder(_lowercase )[0]
a_ : Dict = prompt_embeds
# forward
a_ : int = ldmad_pipe(**_lowercase )
a_ , a_ : Optional[int] = output.rgb, output.depth
a_ : List[str] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : Any = PNDMScheduler(skip_prk_steps=_lowercase )
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[Any] = self.get_dummy_inputs(_lowercase )
a_ : int = """french fries"""
a_ : Any = ldmad_pipe(**_lowercase , negative_prompt=_lowercase )
a_ , a_ : Optional[Any] = output.rgb, output.depth
a_ : Tuple = rgb[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[int] = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
a_ : Union[str, Any] = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> List[str]:
a_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Dict = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Tuple = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Any:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_inputs(_lowercase )
a_ : Optional[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : int = output.rgb, output.depth
a_ : str = rgb[0, -3:, -3:, -1].flatten()
a_ : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
a_ : Optional[int] = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
a_ : Optional[int] = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> str:
a_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Tuple = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Any = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : Union[str, Any] = ldmad_pipe(**_lowercase )
a_ , a_ : str = output.rgb, output.depth
a_ : List[str] = 0.4_9_5_5_8_6
a_ : int = 0.3_3_7_9_5_5_1_5
a_ : int = 1_1_2.4_8_5_1_8
a_ : Optional[int] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : List[Any] = output.rgb, output.depth
a_ : int = 0.4_1_9_4_1_2_7
a_ : List[str] = 0.3_5_3_7_5_5_8_6
a_ : Optional[int] = 0.5_6_3_8_5_0_2
a_ : str = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 248 | 0 |
import os
import numpy
import onnx
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : List[Any] = a.name
__lowerCamelCase : Union[str, Any] = b.name
__lowerCamelCase : str = ''
__lowerCamelCase : str = ''
__lowerCamelCase : Any = a == b
__lowerCamelCase : Tuple = name_a
__lowerCamelCase : Tuple = name_b
return res
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase__ , lowerCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase__ , lowerCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase__ , lowerCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Union[str, Any] = list(model.graph.initializer )
__lowerCamelCase : Union[str, Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowerCamelCase : Optional[int] = inits[i].name
__lowerCamelCase : List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Any = os.path.dirname(lowerCamelCase__ )
__lowerCamelCase : List[Any] = os.path.basename(lowerCamelCase__ )
__lowerCamelCase : List[str] = onnx.load(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Any = list(model.graph.initializer )
__lowerCamelCase : int = set()
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = 0
for i in range(len(lowerCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase__ )
dup_set.add(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = inits[j].data_type
__lowerCamelCase : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase__ )
total_reduced_size += mem_size
__lowerCamelCase : Tuple = inits[i].name
__lowerCamelCase : Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase__ )
else:
__lowerCamelCase : Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__lowerCamelCase : Tuple = sorted(lowerCamelCase__ )
_remove_dup_initializers_from_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = 'optimized_' + model_file_name
__lowerCamelCase : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
onnx.save(lowerCamelCase__ , lowerCamelCase__ )
return new_model
| 113 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113 | 1 |
"""simple docstring"""
from collections import defaultdict
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = 1
__A = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCamelCase )
if ret % 2 == 0:
cuts.append(__UpperCamelCase )
return ret
def lowerCAmelCase ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
lowercase_ , lowercase_ = 10, 9
lowercase_ = defaultdict(list)
lowercase_ = {}
lowercase_ = []
lowercase_ = 0
lowercase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 266 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = PegasusTokenizer
A_ : int = PegasusTokenizerFast
A_ : Optional[Any] = True
A_ : Union[str, Any] = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A = PegasusTokenizer(_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ):
'''simple docstring'''
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = '''</s>'''
__A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<pad>''' )
self.assertEqual(vocab_keys[1], '''</s>''' )
self.assertEqual(vocab_keys[-1], '''v''' )
self.assertEqual(len(_lowerCamelCase ), 11_03 )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 11_03 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
__A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
__A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
__A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__A = '''To ensure a smooth flow of bank resolutions.'''
__A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = ['''This is going to be way too long.''' * 1_50, '''short example''']
__A = ['''not super long but more than 5 tokens''', '''tiny''']
__A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
__A = self._large_tokenizer(
text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# fmt: off
__A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', )
@require_sentencepiece
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = PegasusTokenizer
A_ : Union[str, Any] = PegasusTokenizerFast
A_ : Any = True
A_ : str = True
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ):
'''simple docstring'''
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
__A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
__A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = ['''This is going to be way too long.''' * 10_00, '''short example''']
__A = ['''not super long but more than 5 tokens''', '''tiny''']
__A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
__A = self._large_tokenizer(
text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
__A = self._large_tokenizer(_lowerCamelCase ).input_ids
self.assertListEqual(
_lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
| 266 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Dict=[10, 20, 30, 40] , __lowerCamelCase : List[str]=[2, 2, 3, 2] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : int=0.02 , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Dict=[2, 3, 4] , __lowerCamelCase : Any=None , ) -> int:
A : Union[str, Any] = parent
A : str = batch_size
A : Any = image_size
A : Optional[Any] = num_channels
A : str = num_stages
A : List[str] = hidden_sizes
A : Tuple = depths
A : Tuple = is_training
A : int = use_labels
A : List[str] = intermediate_size
A : Union[str, Any] = hidden_act
A : int = num_labels
A : Dict = initializer_range
A : List[str] = out_features
A : Tuple = out_indices
A : List[Any] = scope
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Any = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.num_labels )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ) -> List[Any]:
A : Tuple = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[int] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ) -> Optional[int]:
A : Optional[int] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any ) -> List[str]:
A : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[Any] = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Optional[Any] = None
A : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
A : str = self.prepare_config_and_inputs()
A , A , A : Optional[int] = config_and_inputs
A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
A : Optional[Any] = self.prepare_config_and_inputs()
A , A , A : Tuple = config_and_inputs
A : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
A : str = ConvNextVaModelTester(self )
A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
pass
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A , A : str = self.model_tester.prepare_config_and_inputs_with_labels()
A : List[str] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
A : str = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Union[str, Any] = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A , A : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
A : Any = False
A : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
A : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
A : int = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
A , A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(__lowerCamelCase )
A : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : int = [*signature.parameters.keys()]
A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
def check_hidden_states_output(__lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
A : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[str] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : str = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Dict = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
A : Optional[int] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
A : Union[str, Any] = self.default_image_processor
A : Dict = prepare_img()
A : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Optional[Any] = model(**__lowerCamelCase )
# verify the logits
A : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Any = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 256 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Dict = np.argmax(_lowerCamelCase , axis=1 )
return np.sum(outputs == labels )
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , encoding="utf_8" ) as f:
A : Dict = csv.reader(_lowerCamelCase )
A : Optional[int] = []
next(_lowerCamelCase ) # skip the first line
for line in tqdm(_lowerCamelCase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : str = []
for dataset in encoded_datasets:
A : List[str] = len(_lowerCamelCase )
A : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
A : Any = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
A : str = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowerCamelCase ):
A : Optional[int] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A : str = with_conta
A : Any = with_conta
A : Union[str, Any] = len(_lowerCamelCase ) - 1
A : Dict = len(_lowerCamelCase ) - 1
A : int = with_conta
A : int = with_conta
A : int = mc_label
A : Optional[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowerCamelCase ) for t in all_inputs ) )
return tensor_datasets
def UpperCAmelCase ( ):
A : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_lowerCamelCase , default="" )
parser.add_argument("--eval_dataset" , type=_lowerCamelCase , default="" )
parser.add_argument("--seed" , type=_lowerCamelCase , default=42 )
parser.add_argument("--num_train_epochs" , type=_lowerCamelCase , default=3 )
parser.add_argument("--train_batch_size" , type=_lowerCamelCase , default=8 )
parser.add_argument("--eval_batch_size" , type=_lowerCamelCase , default=16 )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=_lowerCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_lowerCamelCase , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_lowerCamelCase , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_lowerCamelCase , default=6.25e-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_lowerCamelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_lowerCamelCase , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_lowerCamelCase , default=0.01 )
parser.add_argument("--lm_coef" , type=_lowerCamelCase , default=0.9 )
parser.add_argument("--n_valid" , type=_lowerCamelCase , default=374 )
parser.add_argument("--server_ip" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
A : List[Any] = parser.parse_args()
print(_lowerCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A : Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
A : List[Any] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_lowerCamelCase , _lowerCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A : List[Any] = ["_start_", "_delimiter_", "_classify_"]
A : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowerCamelCase )
A : str = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
A : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowerCamelCase ) )
model.to(_lowerCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return obj
return [tokenize_and_encode(_lowerCamelCase ) for o in obj]
logger.info("Encoding dataset..." )
A : List[str] = load_rocstories_dataset(args.train_dataset )
A : List[str] = load_rocstories_dataset(args.eval_dataset )
A : str = (train_dataset, eval_dataset)
A : Any = tokenize_and_encode(_lowerCamelCase )
# Compute the max input length for the Transformer
A : Any = model.config.n_positions // 2 - 2
A : List[str] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A : Dict = min(_lowerCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A : List[Any] = pre_process_datasets(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase )
A , A : Dict = tensor_datasets[0], tensor_datasets[1]
A : Union[str, Any] = TensorDataset(*_lowerCamelCase )
A : Dict = RandomSampler(_lowerCamelCase )
A : Dict = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.train_batch_size )
A : int = TensorDataset(*_lowerCamelCase )
A : List[str] = SequentialSampler(_lowerCamelCase )
A : Dict = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A : Any = args.max_steps
A : str = args.max_steps // (len(_lowerCamelCase ) // args.gradient_accumulation_steps) + 1
else:
A : Optional[int] = len(_lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
A : str = list(model.named_parameters() )
A : Tuple = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
A : List[Any] = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
A : Optional[Any] = AdamW(_lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
A : Any = get_linear_schedule_with_warmup(
_lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowerCamelCase )
if args.do_train:
A , A , A : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
A : List[Any] = 0
A : Any = 0
A : Dict = tqdm(_lowerCamelCase , desc="Training" )
for step, batch in enumerate(_lowerCamelCase ):
A : List[str] = tuple(t.to(_lowerCamelCase ) for t in batch )
A , A , A , A : Any = batch
A : Any = model(_lowerCamelCase , mc_token_ids=_lowerCamelCase , lm_labels=_lowerCamelCase , mc_labels=_lowerCamelCase )
A : Any = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A : Dict = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A : Optional[int] = "Training loss: {:.2e} lr: {:.2e}".format(_lowerCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A : Tuple = model.module if hasattr(_lowerCamelCase , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A : Dict = os.path.join(args.output_dir , _lowerCamelCase )
A : Any = os.path.join(args.output_dir , _lowerCamelCase )
torch.save(model_to_save.state_dict() , _lowerCamelCase )
model_to_save.config.to_json_file(_lowerCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A : List[str] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowerCamelCase )
if args.do_eval:
model.eval()
A , A : Union[str, Any] = 0, 0
A , A : List[Any] = 0, 0
for batch in tqdm(_lowerCamelCase , desc="Evaluating" ):
A : List[str] = tuple(t.to(_lowerCamelCase ) for t in batch )
A , A , A , A : Optional[int] = batch
with torch.no_grad():
A , A , A , A : Any = model(
_lowerCamelCase , mc_token_ids=_lowerCamelCase , lm_labels=_lowerCamelCase , mc_labels=_lowerCamelCase )
A : Dict = mc_logits.detach().cpu().numpy()
A : Union[str, Any] = mc_labels.to("cpu" ).numpy()
A : Union[str, Any] = accuracy(_lowerCamelCase , _lowerCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A : Union[str, Any] = eval_loss / nb_eval_steps
A : Tuple = eval_accuracy / nb_eval_examples
A : Optional[int] = tr_loss / nb_tr_steps if args.do_train else None
A : Dict = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
A : Optional[Any] = os.path.join(args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _lowerCamelCase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 256 | 1 |
"""simple docstring"""
def __lowercase ( snake_case_ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
__A : int = 0
__A : List[Any] = len(snake_case_ )
for i in range(n - 1 ):
for j in range(i + 1 ,snake_case_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __lowercase ( snake_case_ : Any ) ->Optional[int]:
'''simple docstring'''
if len(snake_case_ ) <= 1:
return arr, 0
__A : Optional[int] = len(snake_case_ ) // 2
__A : Dict = arr[0:mid]
__A : List[str] = arr[mid:]
__A , __A : Optional[Any] = count_inversions_recursive(snake_case_ )
__A , __A : Any = count_inversions_recursive(snake_case_ )
__A , __A : List[Any] = _count_cross_inversions(snake_case_ ,snake_case_ )
__A : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __lowercase ( snake_case_ : List[str] ,snake_case_ : Optional[int] ) ->List[Any]:
'''simple docstring'''
__A : Optional[int] = []
__A : Tuple = 0
while i < len(snake_case_ ) and j < len(snake_case_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __lowercase ( ) ->Optional[int]:
'''simple docstring'''
__A : int = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__A : Dict = count_inversions_bf(snake_case_ )
__A , __A : Optional[Any] = count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' ,snake_case_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__A : List[str] = count_inversions_bf(snake_case_ )
__A , __A : int = count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' ,snake_case_ )
# an empty list should also have zero inversions
__A : Union[str, Any] = []
__A : Any = count_inversions_bf(snake_case_ )
__A , __A : str = count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' ,snake_case_ )
if __name__ == "__main__":
main()
| 179 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def __lowercase ( snake_case_ : str ,snake_case_ : Tuple=100 ,snake_case_ : int=" " ) ->List[str]:
'''simple docstring'''
__A : Dict = text.split(snake_case_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 ,len(snake_case_ ) ,snake_case_ )]
def __lowercase ( snake_case_ : dict ) ->dict:
'''simple docstring'''
__A , __A : Optional[int] = [], []
for title, text in zip(documents['''title'''] ,documents['''text'''] ):
if text is not None:
for passage in split_text(snake_case_ ):
titles.append(title if title is not None else '''''' )
texts.append(snake_case_ )
return {"title": titles, "text": texts}
def __lowercase ( snake_case_ : dict ,snake_case_ : DPRContextEncoder ,snake_case_ : DPRContextEncoderTokenizerFast ) ->dict:
'''simple docstring'''
__A : Tuple = ctx_tokenizer(
documents['''title'''] ,documents['''text'''] ,truncation=snake_case_ ,padding='''longest''' ,return_tensors='''pt''' )['''input_ids''']
__A : int = ctx_encoder(input_ids.to(device=snake_case_ ) ,return_dict=snake_case_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowercase ( snake_case_ : "RagExampleArguments" ,snake_case_ : "ProcessingArguments" ,snake_case_ : "IndexHnswArguments" ,) ->Tuple:
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__A : Optional[Any] = load_dataset(
'''csv''' ,data_files=[rag_example_args.csv_path] ,split='''train''' ,delimiter='''\t''' ,column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__A : str = dataset.map(snake_case_ ,batched=snake_case_ ,num_proc=processing_args.num_proc )
# And compute the embeddings
__A : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case_ )
__A : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__A : Optional[int] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__A : List[Any] = dataset.map(
partial(snake_case_ ,ctx_encoder=snake_case_ ,ctx_tokenizer=snake_case_ ) ,batched=snake_case_ ,batch_size=processing_args.batch_size ,features=snake_case_ ,)
# And finally save your dataset
__A : int = os.path.join(rag_example_args.output_dir ,'''my_knowledge_dataset''' )
dataset.save_to_disk(snake_case_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__A : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d ,index_hnsw_args.m ,faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' ,custom_index=snake_case_ )
# And save the index
__A : List[str] = os.path.join(rag_example_args.output_dir ,'''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(snake_case_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(
default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_lowerCamelCase = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_lowerCamelCase = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_lowerCamelCase = field(
default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_lowerCamelCase = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_lowerCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a_ , a_ , a_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 179 | 1 |
'''simple docstring'''
import sys
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = len(lowerCamelCase__ )
A_ : Union[str, Any] = [[0 for x in range(lowerCamelCase__ )] for x in range(lowerCamelCase__ )]
A_ : int = [[0 for x in range(lowerCamelCase__ )] for x in range(lowerCamelCase__ )]
for chain_length in range(2 , lowerCamelCase__ ):
for a in range(1 , n - chain_length + 1 ):
A_ : Any = a + chain_length - 1
A_ : int = sys.maxsize
for c in range(lowerCamelCase__ , lowerCamelCase__ ):
A_ : str = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A_ : List[str] = cost
A_ : List[Any] = c
return matrix, sol
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if i == j:
print("""A""" + str(lowerCamelCase__ ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(lowerCamelCase__ , lowerCamelCase__ , optimal_solution[i][j] )
print_optiomal_solution(lowerCamelCase__ , optimal_solution[i][j] + 1 , lowerCamelCase__ )
print(""")""" , end=""" """ )
def a ( ):
'''simple docstring'''
A_ : Dict = [30, 35, 15, 5, 10, 20, 25]
A_ : Tuple = len(lowerCamelCase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A_, A_ : Any = matrix_chain_order(lowerCamelCase__ )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCamelCase__ , 1 , n - 1 )
if __name__ == "__main__":
main() | 135 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCAmelCase :
@property
def _a (self ):
return self.get_dummy_input()
@property
def _a (self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _a (self , lowercase=True , lowercase=False , lowercase=False , lowercase=False , ):
A_ : List[str] = 4
A_ : int = 32
A_ : Optional[int] = (32, 32)
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : int = torch.device(lowercase )
A_ : int = (batch_size, num_channels) + sizes
A_ : Optional[int] = randn_tensor(lowercase , generator=lowercase , device=lowercase )
A_ : Union[str, Any] = {"""hidden_states""": hidden_states}
if include_temb:
A_ : str = 128
A_ : List[Any] = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
A_ : List[str] = torch.manual_seed(1 )
A_ : int = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
A_ : List[str] = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
A_ : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def _a (self ):
A_ : Tuple = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A_ : Any = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self , lowercase ):
A_, A_ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
A_ : int = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
A_ : List[str] = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ : int = output[0, -1, -3:, -3:]
A_ : List[Any] = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _a (self ):
A_, A_ : Tuple = self.prepare_init_args_and_inputs_for_common()
A_ : List[str] = self.block_class(**lowercase )
model.to(lowercase )
model.train()
A_ : Any = model(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : str = output[0]
A_ : Union[str, Any] = torch.device(lowercase )
A_ : Tuple = randn_tensor(output.shape , device=lowercase )
A_ : List[str] = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 135 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__lowerCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
__lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def lowercase ( self : Optional[Any] ) -> List[Any]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
__lowerCAmelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase ( self : Tuple ) -> Tuple:
print(f"""Found {torch.cuda.device_count()} devices.""" )
__lowerCAmelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase ( self : Optional[int] ) -> Tuple:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
__lowerCAmelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
_snake_case : Tuple = Accelerator()
_snake_case : int = (accelerator.state.process_index + 2, 10)
_snake_case : Union[str, Any] = torch.randint(0, 10, shape).to(accelerator.device)
_snake_case : Optional[int] = ''
_snake_case : Optional[Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_snake_case : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_snake_case : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 284 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any="pt" ) -> List[str]:
SCREAMING_SNAKE_CASE_ = {'add_prefix_space': True} if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE_ = padding_side
return tokenizer(
[line] , max_length=__UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = input_ids.ne(__UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]="train" , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]="" , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE_ = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ = max_source_length
SCREAMING_SNAKE_CASE_ = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = tgt_lang
def __len__( self : Tuple ):
return len(self.src_lens )
def __getitem__( self : List[str] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ = self.prefix + linecache.getline(str(self.src_file ) , _lowerCAmelCase ).rstrip('\n' )
SCREAMING_SNAKE_CASE_ = linecache.getline(str(self.tgt_file ) , _lowerCAmelCase ).rstrip('\n' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
SCREAMING_SNAKE_CASE_ = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE_ = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE_ = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : Optional[int] ):
return [len(_lowerCAmelCase ) for x in Path(_lowerCAmelCase ).open().readlines()]
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = trim_batch(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = trim_batch(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCamelCase__ : List[str] = getLogger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : List[List] ) -> Tuple:
return list(itertools.chain.from_iterable(__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ = get_git_info()
save_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'git_log.json' ) )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=4 , **__UpperCAmelCase : Tuple ) -> str:
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
with open(__UpperCAmelCase ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Tuple:
SCREAMING_SNAKE_CASE_ = git.Repo(search_parent_directories=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'repo_id': str(__UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __UpperCAmelCase : Callable , __UpperCAmelCase : Iterable ) -> List:
return list(map(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
with open(__UpperCAmelCase , 'wb' ) as f:
return pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Any:
def remove_articles(__UpperCAmelCase : Any ):
return re.sub(r'\b(a|an|the)\b' , ' ' , __UpperCAmelCase )
def white_space_fix(__UpperCAmelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__UpperCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCAmelCase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCAmelCase ) ) ) )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = normalize_answer(__UpperCAmelCase ).split()
SCREAMING_SNAKE_CASE_ = normalize_answer(__UpperCAmelCase ).split()
SCREAMING_SNAKE_CASE_ = Counter(__UpperCAmelCase ) & Counter(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Any:
return normalize_answer(__UpperCAmelCase ) == normalize_answer(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Dict:
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 0
for hypo, pred in zip(__UpperCAmelCase , __UpperCAmelCase ):
em += exact_match_score(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
em /= len(__UpperCAmelCase )
return {"em": em}
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
return model_prefix.startswith('rag' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ = 'dropout_rate'
for p in extra_params:
if getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ) and not hasattr(__UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
continue
SCREAMING_SNAKE_CASE_ = p if hasattr(__UpperCAmelCase , __UpperCAmelCase ) else equivalent_param[p]
setattr(__UpperCAmelCase , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
return hparams, config | 225 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( UpperCAmelCase__ : NDArray[floataa] , UpperCAmelCase__ : NDArray[floataa] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int , ) -> list[float]:
lowercase_ , lowercase_ : Optional[Any] = coefficient_matrix.shape
lowercase_ , lowercase_ : Optional[int] = constant_matrix.shape
if rowsa != colsa:
lowercase_ : Optional[int] = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(UpperCAmelCase__ )
if colsa != 1:
lowercase_ : Any = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(UpperCAmelCase__ )
if rowsa != rowsa:
lowercase_ : Tuple = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != rowsa:
lowercase_ : Dict = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(UpperCAmelCase__ )} and {rowsa}'''
)
raise ValueError(UpperCAmelCase__ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowercase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase_ , lowercase_ : str = table.shape
strictly_diagonally_dominant(UpperCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCAmelCase__ ):
lowercase_ : List[Any] = []
for row in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = 0
for col in range(UpperCAmelCase__ ):
if col == row:
lowercase_ : Tuple = table[row][col]
elif col == cols - 1:
lowercase_ : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase_ : Dict = (temp + val) / denom
new_val.append(UpperCAmelCase__ )
lowercase_ : List[str] = new_val
return [float(UpperCAmelCase__ ) for i in new_val]
def lowerCamelCase ( UpperCAmelCase__ : NDArray[floataa] ) -> bool:
lowercase_ , lowercase_ : Union[str, Any] = table.shape
lowercase_ : Optional[int] = True
for i in range(0 , UpperCAmelCase__ ):
lowercase_ : List[str] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | '''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase_ : List[Any] = CursorInfo()
lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> str:
if os.name == "nt":
lowercase_ : int = CursorInfo()
lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 21 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 16
UpperCAmelCase__ = 32
def UpperCAmelCase_ ( __snake_case , __snake_case = 16 ) -> str:
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowercase =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case ):
# max_length=None => use the model max length (it's actually the default)
_lowercase =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase =16
elif accelerator.mixed_precision != "no":
_lowercase =8
else:
_lowercase =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowercase =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
_lowercase =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
_lowercase =2
# New Code #
_lowercase =int(args.gradient_accumulation_steps )
_lowercase =int(args.local_sgd_steps )
# Initialize accelerator
_lowercase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase =config['''lr''']
_lowercase =int(config['''num_epochs'''] )
_lowercase =int(config['''seed'''] )
_lowercase =int(config['''batch_size'''] )
_lowercase =evaluate.load('''glue''' , '''mrpc''' )
set_seed(__snake_case )
_lowercase , _lowercase =get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase =model.to(accelerator.device )
# Instantiate optimizer
_lowercase =AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
_lowercase =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
with LocalSGD(
accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case ):
_lowercase =model(**__snake_case )
_lowercase =output.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase =model(**__snake_case )
_lowercase =outputs.logits.argmax(dim=-1 )
_lowercase , _lowercase =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
_lowercase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__snake_case , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_lowercase =parser.parse_args()
_lowercase ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 5 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = [randint(-10_00 , 10_00 ) for i in range(10 )]
UpperCAmelCase_ : str = randint(-50_00 , 50_00 )
return (arr, r)
_lowerCamelCase = make_dataset()
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(_SCREAMING_SNAKE_CASE , 3 ):
if sum(_SCREAMING_SNAKE_CASE ) == target:
return tuple(sorted(_SCREAMING_SNAKE_CASE ) )
return (0, 0, 0)
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
UpperCAmelCase_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase_ : Optional[Any] = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase_ : str = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase_ : Dict = repeat(setup=_SCREAMING_SNAKE_CASE , stmt=_SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
UpperCAmelCase_ : Dict = repeat(setup=_SCREAMING_SNAKE_CASE , stmt=_SCREAMING_SNAKE_CASE , repeat=5 , number=1_00_00 )
return (min(_SCREAMING_SNAKE_CASE ), min(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 67 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1.0 , _UpperCamelCase = None , ) -> Tuple:
super().__init__()
lowerCAmelCase_ = initial_learning_rate
lowerCAmelCase_ = warmup_steps
lowerCAmelCase_ = power
lowerCAmelCase_ = decay_schedule_fn
lowerCAmelCase_ = name
def __call__( self , _UpperCamelCase ) -> List[Any]:
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase_ = tf.cast(__lowercase , tf.floataa )
lowerCAmelCase_ = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase_ = global_step_float / warmup_steps_float
lowerCAmelCase_ = self.initial_learning_rate * tf.math.pow(__lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__lowercase , )
def __a ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase__ ( __lowerCAmelCase : float , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : float = 0.9 , __lowerCAmelCase : float = 0.999 , __lowerCAmelCase : float = 1e-8 , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : Optional[List[str]] = None , ):
"""simple docstring"""
lowerCAmelCase_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowercase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowercase , )
if num_warmup_steps:
lowerCAmelCase_ = WarmUp(
initial_learning_rate=_lowercase , decay_schedule_fn=_lowercase , warmup_steps=_lowercase , )
if weight_decay_rate > 0.0:
lowerCAmelCase_ = AdamWeightDecay(
learning_rate=_lowercase , weight_decay_rate=_lowercase , beta_a=_lowercase , beta_a=_lowercase , epsilon=_lowercase , clipnorm=_lowercase , global_clipnorm=_lowercase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_lowercase , )
else:
lowerCAmelCase_ = tf.keras.optimizers.Adam(
learning_rate=_lowercase , beta_a=_lowercase , beta_a=_lowercase , epsilon=_lowercase , clipnorm=_lowercase , global_clipnorm=_lowercase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _lowerCAmelCase ( A__ ):
def __init__( self , _UpperCamelCase = 0.001 , _UpperCamelCase = 0.9 , _UpperCamelCase = 0.999 , _UpperCamelCase = 1e-7 , _UpperCamelCase = False , _UpperCamelCase = 0.0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "AdamWeightDecay" , **_UpperCamelCase , ) -> Dict:
super().__init__(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase )
lowerCAmelCase_ = weight_decay_rate
lowerCAmelCase_ = include_in_weight_decay
lowerCAmelCase_ = exclude_from_weight_decay
@classmethod
def __a ( cls , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = {"""WarmUp""": WarmUp}
return super(__lowercase , cls ).from_config(__lowercase , custom_objects=__lowercase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
super(__lowercase , self )._prepare_local(__lowercase , __lowercase , __lowercase )
lowerCAmelCase_ = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = list(zip(*__lowercase ) )
return super(__lowercase , self ).apply_gradients(zip(__lowercase , __lowercase ) , name=__lowercase , **__lowercase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase_ = apply_state or {}
lowerCAmelCase_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase_ = self._fallback_apply_state(__lowercase , __lowercase )
lowerCAmelCase_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
lowerCAmelCase_ = self._get_lr(var.device , var.dtype.base_dtype , __lowercase )
lowerCAmelCase_ = self._decay_weights_op(__lowercase , __lowercase , __lowercase )
with tf.control_dependencies([decay] ):
return super(__lowercase , self )._resource_apply_dense(__lowercase , __lowercase , **__lowercase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
lowerCAmelCase_ = self._get_lr(var.device , var.dtype.base_dtype , __lowercase )
lowerCAmelCase_ = self._decay_weights_op(__lowercase , __lowercase , __lowercase )
with tf.control_dependencies([decay] ):
return super(__lowercase , self )._resource_apply_sparse(__lowercase , __lowercase , __lowercase , **__lowercase )
def __a ( self ) -> int:
lowerCAmelCase_ = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def __a ( self , _UpperCamelCase ) -> str:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowercase , __lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowercase , __lowercase ) is not None:
return False
return True
class _lowerCAmelCase ( A__ ):
def __init__( self ) -> Tuple:
lowerCAmelCase_ = []
lowerCAmelCase_ = None
@property
def __a ( self ) -> List[str]:
if self._accum_steps is None:
lowerCAmelCase_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __a ( self ) -> int:
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _UpperCamelCase ) -> List[Any]:
if not self._gradients:
lowerCAmelCase_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowercase ) , trainable=__lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowercase ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(__lowercase )}""" )
for accum_gradient, gradient in zip(self._gradients , __lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowercase )
self._accum_steps.assign_add(1 )
def __a ( self ) -> Dict:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowercase ) )
| 231 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowercase : str =[num for num in range(3, 10_0001, 2) if not is_prime(num)]
def lowerCAmelCase_ ( _lowercase : int) -> list[int]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise ValueError("""n must be an integer""")
if n <= 0:
raise ValueError("""n must be >= 0""")
a__ : int = []
for num in range(len(_lowercase)):
a__ : Any = 0
while 2 * i * i <= odd_composites[num]:
a__ : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(_lowercase):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(_lowercase) == n:
return list_nums
return []
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return compute_nums(1)[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 170 | 0 |
import argparse
import copy
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
with open(_UpperCAmelCase) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE = []
_list.append([line.split()[1], line.split()[2]])
SCREAMING_SNAKE_CASE = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]])
if line.split()[1] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE = []
_list.append([line.split()[0], line.split()[2]])
SCREAMING_SNAKE_CASE = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]])
return dict_of_neighbours
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
with open(_UpperCAmelCase) as f:
SCREAMING_SNAKE_CASE = f.read(1)
SCREAMING_SNAKE_CASE = start_node
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = start_node
SCREAMING_SNAKE_CASE = 0
while visiting not in first_solution:
SCREAMING_SNAKE_CASE = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1]) < int(_UpperCAmelCase) and k[0] not in first_solution:
SCREAMING_SNAKE_CASE = k[1]
SCREAMING_SNAKE_CASE = k[0]
first_solution.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = distance_of_first_solution + int(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = best_node
first_solution.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
SCREAMING_SNAKE_CASE = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1])
- 1_0000
)
return first_solution, distance_of_first_solution
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for n in solution[1:-1]:
SCREAMING_SNAKE_CASE = solution.index(_UpperCAmelCase)
for kn in solution[1:-1]:
SCREAMING_SNAKE_CASE = solution.index(_UpperCAmelCase)
if n == kn:
continue
SCREAMING_SNAKE_CASE = copy.deepcopy(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = kn
SCREAMING_SNAKE_CASE = n
SCREAMING_SNAKE_CASE = 0
for k in _tmp[:-1]:
SCREAMING_SNAKE_CASE = _tmp[_tmp.index(_UpperCAmelCase) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
SCREAMING_SNAKE_CASE = distance + int(i[1])
_tmp.append(_UpperCAmelCase)
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp)
SCREAMING_SNAKE_CASE = len(neighborhood_of_solution[0]) - 1
neighborhood_of_solution.sort(key=lambda _UpperCAmelCase: x[index_of_last_item_in_the_list])
return neighborhood_of_solution
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = first_solution
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = distance_of_first_solution
SCREAMING_SNAKE_CASE = solution
while count <= iters:
SCREAMING_SNAKE_CASE = find_neighborhood(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = neighborhood[index_of_best_solution]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
SCREAMING_SNAKE_CASE = False
while not found:
SCREAMING_SNAKE_CASE = 0
while i < len(_UpperCAmelCase):
if best_solution[i] != solution[i]:
SCREAMING_SNAKE_CASE = best_solution[i]
SCREAMING_SNAKE_CASE = solution[i]
break
SCREAMING_SNAKE_CASE = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node])
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = best_solution[:-1]
SCREAMING_SNAKE_CASE = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
SCREAMING_SNAKE_CASE = cost
SCREAMING_SNAKE_CASE = solution
else:
SCREAMING_SNAKE_CASE = index_of_best_solution + 1
SCREAMING_SNAKE_CASE = neighborhood[index_of_best_solution]
if len(_UpperCAmelCase) >= size:
tabu_list.pop(0)
SCREAMING_SNAKE_CASE = count + 1
return best_solution_ever, best_cost
def lowerCamelCase__ (_UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = generate_neighbours(args.File)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_first_solution(
args.File , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tabu_search(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''')
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 350 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(_UpperCAmelCase)
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ : List[str] = parser.parse_args()
a_ : Dict = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 327 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
_a : List[str] = ['input_features', 'is_longer']
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Tuple=64 , _SCREAMING_SNAKE_CASE : List[str]=4_8000 , _SCREAMING_SNAKE_CASE : List[str]=480 , _SCREAMING_SNAKE_CASE : Union[str, Any]=10 , _SCREAMING_SNAKE_CASE : Optional[int]=1024 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : Any = 0 , _SCREAMING_SNAKE_CASE : List[Any] = 1_4000 , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[int] = "fusion" , _SCREAMING_SNAKE_CASE : Tuple = "repeatpad" , **_SCREAMING_SNAKE_CASE : Dict , )-> Any:
super().__init__(
feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Tuple = top_db
lowerCAmelCase__ : Union[str, Any] = truncation
lowerCAmelCase__ : Dict = padding
lowerCAmelCase__ : List[Any] = fft_window_size
lowerCAmelCase__ : List[Any] = (fft_window_size >> 1) + 1
lowerCAmelCase__ : Tuple = hop_length
lowerCAmelCase__ : List[Any] = max_length_s
lowerCAmelCase__ : str = max_length_s * sampling_rate
lowerCAmelCase__ : List[Any] = sampling_rate
lowerCAmelCase__ : Any = frequency_min
lowerCAmelCase__ : Dict = frequency_max
lowerCAmelCase__ : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=SCREAMING_SNAKE_CASE_ , max_frequency=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , norm=SCREAMING_SNAKE_CASE_ , mel_scale='''htk''' , )
lowerCAmelCase__ : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=SCREAMING_SNAKE_CASE_ , max_frequency=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCAmelCase__( self : Tuple )-> Dict[str, Any]:
lowerCAmelCase__ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple = None )-> np.ndarray:
lowerCAmelCase__ : Tuple = spectrogram(
SCREAMING_SNAKE_CASE_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=SCREAMING_SNAKE_CASE_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Dict:
lowerCAmelCase__ : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ : Tuple = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ : Optional[Any] = [0]
# randomly choose index for each part
lowerCAmelCase__ : Optional[int] = np.random.choice(ranges[0] )
lowerCAmelCase__ : Union[str, Any] = np.random.choice(ranges[1] )
lowerCAmelCase__ : List[Any] = np.random.choice(ranges[2] )
lowerCAmelCase__ : List[str] = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase__ : List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase__ : Tuple = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase__ : int = torch.tensor(mel[None, None, :] )
lowerCAmelCase__ : Tuple = torch.nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase__ : List[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase__ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ ) - max_length
lowerCAmelCase__ : Dict = np.random.randint(0 , overflow + 1 )
lowerCAmelCase__ : List[str] = waveform[idx : idx + max_length]
lowerCAmelCase__ : str = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase__ : Any = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters )
lowerCAmelCase__ : Any = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase__ : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase__ : Dict = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase__ : Optional[int] = False
else:
lowerCAmelCase__ : Optional[Any] = self._random_mel_fusion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
lowerCAmelCase__ : List[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase__ : int = int(max_length / len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : Optional[Any] = np.stack(np.tile(SCREAMING_SNAKE_CASE_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase__ : Optional[int] = int(max_length / len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : str = np.stack(np.tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : Any = np.pad(SCREAMING_SNAKE_CASE_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase__ : str = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters )
lowerCAmelCase__ : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase__ : Union[str, Any] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Any] = None , _SCREAMING_SNAKE_CASE : Any = None , _SCREAMING_SNAKE_CASE : Optional[Any] = None , _SCREAMING_SNAKE_CASE : List[str] = None , **_SCREAMING_SNAKE_CASE : str , )-> BatchFeature:
lowerCAmelCase__ : Dict = truncation if truncation is not None else self.truncation
lowerCAmelCase__ : Dict = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ : Dict = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase__ : str = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCAmelCase__ : str = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase__ : str = [
self._get_input_mel(SCREAMING_SNAKE_CASE_ , max_length if max_length else self.nb_max_samples , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for waveform in raw_speech
]
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(SCREAMING_SNAKE_CASE_ )
is_longer.append(SCREAMING_SNAKE_CASE_ )
if truncation == "fusion" and sum(SCREAMING_SNAKE_CASE_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase__ : Any = np.random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : Tuple = True
if isinstance(input_mel[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : int = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase__ : Optional[Any] = [[longer] for longer in is_longer]
lowerCAmelCase__ : Optional[Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowerCAmelCase__ : Optional[int] = BatchFeature(SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
lowerCAmelCase__ : Tuple = input_features.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return input_features
| 131 |
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ):
UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCamelCase :Dict = 0
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
UpperCamelCase :List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = [1, 3, 0, 5, 8, 5]
__snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 259 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def lowerCAmelCase__ ( ) -> Optional[int]:
if os.name == "nt":
lowerCAmelCase__ : List[str] = CursorInfo()
lowerCAmelCase__ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : Union[str, Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowerCAmelCase__ ( ) -> List[Any]:
if os.name == "nt":
lowerCAmelCase__ : Any = CursorInfo()
lowerCAmelCase__ : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : List[str] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowerCAmelCase__ ( ) -> List[Any]:
try:
hide_cursor()
yield
finally:
show_cursor() | 368 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 | 307 | 0 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCAmelCase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : int , lowerCamelCase__ : float , lowerCamelCase__ : Callable , lowerCamelCase__ : int , lowerCamelCase__ : float = 1.0 , lowerCamelCase__ : str = None , ) ->int:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : int = initial_learning_rate
_UpperCAmelCase : Any = warmup_steps
_UpperCAmelCase : Dict = power
_UpperCAmelCase : int = decay_schedule_fn
_UpperCAmelCase : List[str] = name
def __call__( self : Dict , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_UpperCAmelCase : Dict = tf.cast(lowerCamelCase__ , tf.floataa )
_UpperCAmelCase : Union[str, Any] = tf.cast(self.warmup_steps , tf.floataa )
_UpperCAmelCase : int = global_step_float / warmup_steps_float
_UpperCAmelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase__ , )
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.9_9_9 , __lowerCAmelCase = 1e-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ):
_UpperCAmelCase : Union[str, Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
_UpperCAmelCase : Union[str, Any] = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
_UpperCAmelCase : int = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Dict , lowerCamelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , lowerCamelCase__ : float = 0.9 , lowerCamelCase__ : float = 0.9_9_9 , lowerCamelCase__ : float = 1E-7 , lowerCamelCase__ : bool = False , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : str = "AdamWeightDecay" , **lowerCamelCase__ : List[Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = weight_decay_rate
_UpperCAmelCase : int = include_in_weight_decay
_UpperCAmelCase : int = exclude_from_weight_decay
@classmethod
def lowerCAmelCase__ ( cls : Tuple , lowerCamelCase__ : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = {"WarmUp": WarmUp}
return super(lowerCamelCase__ , cls ).from_config(lowerCamelCase__ , custom_objects=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ) ->int:
'''simple docstring'''
super(lowerCamelCase__ , self )._prepare_local(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__ , self ).apply_gradients(zip(lowerCamelCase__ , lowerCamelCase__ ) , name=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_UpperCAmelCase : Tuple = apply_state or {}
_UpperCAmelCase : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_UpperCAmelCase : List[str] = self._fallback_apply_state(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
_UpperCAmelCase : Any = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_dense(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Any=None ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
_UpperCAmelCase : int = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_sparse(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : str ) ->Optional[Any]:
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return False
return True
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = []
_UpperCAmelCase : int = None
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
if self._accum_steps is None:
_UpperCAmelCase : str = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->str:
'''simple docstring'''
if not self._gradients:
_UpperCAmelCase : Tuple = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}""" )
for accum_gradient, gradient in zip(self._gradients , lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 234 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ = '▁'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = ["input_ids", "token_type_ids"]
lowerCAmelCase : Optional[Any] = FNetTokenizer
def __init__( self : Dict , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : List[str]="[SEP]" , lowerCamelCase__ : Union[str, Any]="<pad>" , lowerCamelCase__ : Optional[Any]="[CLS]" , lowerCamelCase__ : Any="[MASK]" , **lowerCamelCase__ : Any , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = (
AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ , normalized=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else mask_token
)
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : Tuple = remove_space
_UpperCAmelCase : List[Any] = keep_accents
_UpperCAmelCase : Tuple = vocab_file
_UpperCAmelCase : str = False if not self.vocab_file else True
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 234 | 1 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def A (*__A : str ) -> Optional[int]:
"""simple docstring"""
if not isinstance(a_ , a_ ):
UpperCAmelCase_ = list(a_ )
for i in range(len(a_ ) ):
UpperCAmelCase_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def A (__A : Exception ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(a_ , a_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def A (__A : callable = None , __A : int = 128 ) -> List[str]:
"""simple docstring"""
if function is None:
return functools.partial(a_ , starting_batch_size=a_ )
UpperCAmelCase_ = starting_batch_size
def decorator(*__A : Optional[int] , **__A : List[str] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase_ = list(inspect.signature(a_ ).parameters.keys() )
# Guard against user error
if len(a_ ) < (len(a_ ) + 1):
UpperCAmelCase_ = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(a_ , *a_ , **a_ )
except Exception as e:
if should_reduce_batch_size(a_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 366 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase_ ( _lowerCamelCase : bool = True , *_lowerCamelCase : int , **_lowerCamelCase : Optional[Any]):
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.")
lowercase__ : Dict = False
if main_process_only:
lowercase__ : int = PartialState().local_process_index == 0
return _tqdm(*_lowerCamelCase , **_lowerCamelCase , disable=_lowerCamelCase)
| 87 | import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]:
lowercase__ : str = max_length
lowercase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
lowercase__ : str = input_ids.shape[-1]
lowercase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase_ , )
lowercase__ : Optional[int] = start_length
lowercase__ : str = max_new_tokens
lowercase__ : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict:
lowercase__ : List[str] = max_time
lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int):
lowercase__ : Optional[int] = stopping_criteria.max_length
lowercase__ : str = deepcopy(_lowerCamelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase))
return new_stopping_criteria
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : List[Any] = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""OwlViTFeatureExtractor"""]
__lowerCamelCase : int = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any ):
snake_case__ : List[str] = b.T
snake_case__ : Union[str, Any] = np.sum(np.square(snake_case_ ) , axis=1 )
snake_case__ : Dict = np.sum(np.square(snake_case_ ) , axis=0 )
snake_case__ : Dict = np.matmul(snake_case_ , snake_case_ )
snake_case__ : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Tuple ):
snake_case__ : Tuple = x.reshape(-1 , 3 )
snake_case__ : int = squared_euclidean_distance(snake_case_ , snake_case_ )
return np.argmin(snake_case_ , axis=1 )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["pixel_values"]
def __init__( self : str , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : bool = True , __A : Dict[str, int] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : bool = True , **__A : Union[str, Any] , ):
super().__init__(**__A )
snake_case__ : Optional[int] = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
snake_case__ : List[Any] = get_size_dict(__A )
snake_case__ : Any = np.array(__A ) if clusters is not None else None
snake_case__ : Optional[Any] = do_resize
snake_case__ : Any = size
snake_case__ : List[Any] = resample
snake_case__ : List[Any] = do_normalize
snake_case__ : Dict = do_color_quantize
def _lowercase ( self : List[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : Optional[Union[str, ChannelDimension]] = None , **__A : int , ):
snake_case__ : List[Any] = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__A , size=(size["height"], size["width"]) , resample=__A , data_format=__A , **__A )
def _lowercase ( self : List[Any] , __A : np.ndarray , __A : Optional[Union[str, ChannelDimension]] = None , ):
snake_case__ : List[str] = rescale(image=__A , scale=1 / 1_2_7.5 , data_format=__A )
snake_case__ : List[Any] = image - 1
return image
def _lowercase ( self : Dict , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Optional[bool] = None , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A : Optional[int] , ):
snake_case__ : Any = do_resize if do_resize is not None else self.do_resize
snake_case__ : Union[str, Any] = size if size is not None else self.size
snake_case__ : Union[str, Any] = get_size_dict(__A )
snake_case__ : Optional[Any] = resample if resample is not None else self.resample
snake_case__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ : Union[str, Any] = clusters if clusters is not None else self.clusters
snake_case__ : Union[str, Any] = np.array(__A )
snake_case__ : Any = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ : Optional[Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
snake_case__ : List[str] = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_normalize:
snake_case__ : Union[str, Any] = [self.normalize(image=__A ) for image in images]
if do_color_quantize:
snake_case__ : int = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ : int = np.array(__A )
snake_case__ : Dict = color_quantize(__A , __A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ : str = images.shape[0]
snake_case__ : str = images.reshape(__A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ : Union[str, Any] = list(__A )
else:
snake_case__ : Any = [to_channel_dimension_format(__A , __A ) for image in images]
snake_case__ : Optional[int] = {"input_ids": images}
return BatchFeature(data=__A , tensor_type=__A )
| 286 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
snake_case__ : Optional[datasets.Features] = None
snake_case__ : str = "utf-8"
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
snake_case__ : bool = True # deprecated
snake_case__ : Optional[int] = None # deprecated
snake_case__ : int = 1_0 << 2_0 # 10MB
snake_case__ : Optional[bool] = None
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
snake_case__ : int = JsonConfig
def _A ( self : List[Any] ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase :Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def _A ( self : int , __lowerCamelCase : Tuple ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase :List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
UpperCamelCase :Optional[int] = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :Optional[int] = [files]
UpperCamelCase :Optional[Any] = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase :Tuple = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :Dict = [files]
UpperCamelCase :List[Any] = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"""files""": files} ) )
return splits
def _A ( self : List[str] , __lowerCamelCase : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase :List[str] = self.config.features.arrow_schema.field(__lowerCamelCase ).type
UpperCamelCase :Union[str, Any] = pa_table.append_column(__lowerCamelCase , pa.array([None] * len(__lowerCamelCase ) , type=__lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase :Any = table_cast(__lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _A ( self : Tuple , __lowerCamelCase : int ):
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase :Dict = json.load(__lowerCamelCase )
# We keep only the field we are interested in
UpperCamelCase :int = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCamelCase , (list, tuple) ):
UpperCamelCase :Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase :Any = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
else:
UpperCamelCase :Optional[int] = dataset
UpperCamelCase :Optional[Any] = pa.Table.from_pydict(__lowerCamelCase )
yield file_idx, self._cast_table(__lowerCamelCase )
# If the file has one json object per line
else:
with open(__lowerCamelCase , """rb""" ) as f:
UpperCamelCase :List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase :str = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase :Optional[int] = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase :int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase :List[str] = batch.decode(self.config.encoding , errors=__lowerCamelCase ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase :int = paj.read_json(
io.BytesIO(__lowerCamelCase ) , read_options=paj.ReadOptions(block_size=__lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCamelCase )
or block_size > len(__lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__lowerCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase :Optional[Any] = json.load(__lowerCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCamelCase , __lowerCamelCase ): # list is the only sequence type supported in JSON
try:
UpperCamelCase :Tuple = set().union(*[row.keys() for row in dataset] )
UpperCamelCase :Dict = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
UpperCamelCase :Any = pa.Table.from_pydict(__lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__lowerCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
batch_idx += 1
| 38 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 38 | 1 |
from math import pi, sqrt
def snake_case_ ( lowerCAmelCase_ )-> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCAmelCase_ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCAmelCase_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ( )-> None:
'''simple docstring'''
assert gamma(0.5 ) == sqrt(lowerCAmelCase_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ : Union[str, Any] = 1.0
while num:
A_ : Optional[Any] = float(input("""Gamma of: """))
print(f"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 365 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> List[str]:
_UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )]
_UpperCAmelCase : int = size
def __getitem__( self ,a_ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _snake_case ( self ) -> List[Any]:
return self._size
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a_ ,a_ ) )
def _snake_case ( self ,a_ ,a_ ) -> int | None:
_UpperCAmelCase : Union[str, Any] = deque([start_vertex] )
_UpperCAmelCase : list[int | None] = [None] * self.size
_UpperCAmelCase : Union[str, Any] = 0
while queue:
_UpperCAmelCase : Union[str, Any] = queue.popleft()
_UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCAmelCase : List[Any] = current_distance + edge.weight
_UpperCAmelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(a_ ,a_ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCAmelCase : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
assert (
isinstance(__lowercase , __lowercase ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
A , A: int = 1, 1
for _ in range(number_of_steps - 1 ):
A , A: Any = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A: str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]:
if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A: Union[str, Any] = len(__lowercase )
A: str = matrix_length // 2
A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )]
A: Optional[Any] = [
[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase )
]
A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )]
A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]:
return len(__lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
print('''\n'''.join(str(__lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase ) == (2, 2):
return default_matrix_multiplication(__lowercase , __lowercase )
A , A , A , A: Union[str, Any] = split_matrix(__lowercase )
A , A , A , A: List[Any] = split_matrix(__lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
A: Any = matrix_addition(__lowercase , __lowercase )
A: List[Any] = matrix_addition(__lowercase , __lowercase )
A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
# construct the new matrix from our 4 quadrants
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]:
A: int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowercase )
A: str = matrix_dimensions(__lowercase )
A: str = matrix_dimensions(__lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A: Union[str, Any] = max(*__lowercase , *__lowercase )
A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) )
A: List[Any] = matrixa
A: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A: Any = actual_strassen(__lowercase , __lowercase )
# Removing the additional zeros
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 319 | 1 |
"""simple docstring"""
__A : Optional[Any] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 57 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(lowercase__ ) as metadata_file:
A = json.load(lowercase__ )
A = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A = torch.load(lowercase__ , map_location="cpu" )
# Load the entity vocab file
A = load_entity_vocab(lowercase__ )
A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A = AddedToken("<ent>" , lstrip=lowercase__ , rstrip=lowercase__ )
A = AddedToken("<ent2>" , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
A = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
A = state_dict["embeddings.word_embeddings.weight"]
A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A = F"""encoder.layer.{layer_index}.attention.self."""
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A = state_dict["entity_embeddings.entity_embeddings.weight"]
A = entity_emb[entity_vocab["[MASK]"]]
A = LukeModel(config=lowercase__ ).eval()
A , A = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
A = LukeTokenizer.from_pretrained(lowercase__ , task="entity_classification" )
A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A = (39, 42)
A = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors="pt" )
A = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
A = torch.Size((1, 42, 1_024) )
A = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
A = torch.Size((1, 42, 768) )
A = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A = torch.Size((1, 1, 1_024) )
A = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
A = torch.Size((1, 1, 768) )
A = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = {}
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowercase__ ):
A , A = line.rstrip().split("\t" )
A = index
return entity_vocab
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 57 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _UpperCamelCase ( __A , __A , __A , __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = s.rsplit(__A , __A )
return new.join(__A )
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCamelCase__ = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
UpperCamelCase__ = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
UpperCamelCase__ = rreplace(__A , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
UpperCamelCase__ = rreplace(__A , ".b" , ".bias" , 1 )
UpperCamelCase__ = value.float()
return upgrade
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A=None , __A=True ) -> Optional[int]:
'''simple docstring'''
from dall_e import Encoder
UpperCamelCase__ = Encoder()
if os.path.exists(__A ):
UpperCamelCase__ = torch.load(__A )
else:
UpperCamelCase__ = torch.hub.load_state_dict_from_url(__A )
if isinstance(__A , __A ):
UpperCamelCase__ = ckpt.state_dict()
encoder.load_state_dict(__A )
if config_path is not None:
UpperCamelCase__ = FlavaImageCodebookConfig.from_pretrained(__A )
else:
UpperCamelCase__ = FlavaImageCodebookConfig()
UpperCamelCase__ = FlavaImageCodebook(__A ).eval()
UpperCamelCase__ = encoder.state_dict()
UpperCamelCase__ = upgrade_state_dict(__A )
hf_model.load_state_dict(__A )
UpperCamelCase__ = hf_model.state_dict()
UpperCamelCase__ = count_parameters(__A )
UpperCamelCase__ = count_parameters(__A )
assert torch.allclose(__A , __A , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__A )
else:
return hf_state_dict
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a__ : Any = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowercase: int = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = ["pixel_values"]
def __init__(self , lowerCamelCase_ = True , lowerCamelCase_ = 32 , lowerCamelCase_=PILImageResampling.BILINEAR , lowerCamelCase_ = True , **lowerCamelCase_ , ):
"""simple docstring"""
a = do_resize
a = do_rescale
a = size_divisor
a = resample
super().__init__(**lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ ):
"""simple docstring"""
a , a = get_image_size(lowerCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
a = height // size_divisor * size_divisor
a = width // size_divisor * size_divisor
a = resize(lowerCamelCase_ , (new_h, new_w) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
return image
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ ):
"""simple docstring"""
return rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ):
"""simple docstring"""
a = do_resize if do_resize is not None else self.do_resize
a = do_rescale if do_rescale is not None else self.do_rescale
a = size_divisor if size_divisor is not None else self.size_divisor
a = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
a = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
a = [to_numpy_array(lowerCamelCase_ ) for img in images]
if do_resize:
a = [self.resize(lowerCamelCase_ , size_divisor=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
a = [self.rescale(lowerCamelCase_ , scale=1 / 255 ) for image in images]
a = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 71 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = LlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = True
a = LlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = True
a = True
a = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["hidden_states"][0]
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__A = (LlamaForCausalLM,) if is_torch_available() else ()
__A = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = False
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = LlamaModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "single_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "multi_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ids_tensor([1, 10] , config.vocab_size )
a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = LlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
a = original_model(lowerCamelCase_ ).last_hidden_state
a = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = {"type": scaling_type, "factor": 10.0}
a = LlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
a = scaled_model(lowerCamelCase_ ).last_hidden_state
a = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
a = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
a = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
a = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
a = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
a = "Simply put, the theory of relativity states that "
a = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
a = tokenizer.encode(lowerCamelCase_ , return_tensors="pt" )
a = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowerCamelCase_ )
# greedy generation outputs
a = model.generate(lowerCamelCase_ , max_new_tokens=64 , top_p=lowerCamelCase_ , temperature=1 , do_sample=lowerCamelCase_ )
a = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = 'roberta'
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = 'transformer'
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = F"""{prefix}.embeddings.{w}.weight"""
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = F"""{prefix}.embeddings.LayerNorm.{w}"""
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
a_ = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[F"""lm_head.dense.{w}"""]
a_ = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[F"""{prefix}.ln_f.{w}"""]
a_ = state_dict['lm_head.weight']
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 175 | import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
a_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _lowercase ( datasets.BuilderConfig ):
lowercase = None
def __lowercase ( lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : List[int] , ):
import pyspark
def generate_fn():
UpperCamelCase_ : Dict = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
UpperCamelCase_ : Tuple = df_with_partition_id.select('*' ).where(F"part_id = {partition_id}" ).drop('part_id' )
UpperCamelCase_ : Union[str, Any] = partition_df.collect()
UpperCamelCase_ : Any = 0
for row in rows:
yield F"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class _lowercase ( _BaseExamplesIterable ):
def __init__( self : Optional[int] , snake_case : "pyspark.sql.DataFrame" , snake_case : Tuple=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = df
UpperCamelCase_ : int = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCamelCase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[int] ) -> Any:
"""simple docstring"""
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : np.random.Generator ) -> "SparkExamplesIterable":
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case )
return SparkExamplesIterable(self.df , partition_order=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : int , snake_case : int ) -> "SparkExamplesIterable":
"""simple docstring"""
UpperCamelCase_ : Tuple = self.split_shard_indices_by_worker(snake_case , snake_case )
return SparkExamplesIterable(self.df , partition_order=snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
"""simple docstring"""
return len(self.partition_order )
class _lowercase ( datasets.DatasetBuilder ):
lowercase = SparkConfig
def __init__( self : List[Any] , snake_case : "pyspark.sql.DataFrame" , snake_case : str = None , snake_case : str = None , **snake_case : Optional[Any] , ) -> List[str]:
"""simple docstring"""
import pyspark
UpperCamelCase_ : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCamelCase_ : str = df
UpperCamelCase_ : Tuple = working_dir
super().__init__(
cache_dir=snake_case , config_name=str(self.df.semanticHash() ) , **snake_case , )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
def create_cache_and_write_probe(snake_case : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case )
UpperCamelCase_ : Tuple = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCamelCase_ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : datasets.download.download_manager.DownloadManager ) -> Optional[int]:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
import pyspark
def get_arrow_batch_size(snake_case : Dict ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
UpperCamelCase_ : List[str] = self.df.count()
UpperCamelCase_ : Union[str, Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCamelCase_ : str = (
self.df.limit(snake_case )
.repartition(1 )
.mapInArrow(snake_case , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCamelCase_ : Optional[int] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCamelCase_ : Optional[Any] = min(snake_case , int(approx_total_size / max_shard_size ) )
UpperCamelCase_ : int = self.df.repartition(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str , snake_case : str , snake_case : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
"""simple docstring"""
import pyspark
UpperCamelCase_ : List[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
UpperCamelCase_ : List[str] = os.path.join(self._working_dir , os.path.basename(snake_case ) ) if self._working_dir else fpath
UpperCamelCase_ : Union[str, Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCamelCase_ : Union[str, Any] = self.config.features
UpperCamelCase_ : Any = self._writer_batch_size
UpperCamelCase_ : Dict = self._fs.storage_options
def write_arrow(snake_case : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCamelCase_ : Any = pyspark.TaskContext().taskAttemptId()
UpperCamelCase_ : str = next(snake_case , snake_case )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Optional[Any] = writer_class(
features=snake_case , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=snake_case , storage_options=snake_case , embed_local_files=snake_case , )
UpperCamelCase_ : str = pa.Table.from_batches([first_batch] )
writer.write_table(snake_case )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCamelCase_, UpperCamelCase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
UpperCamelCase_ : Union[str, Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=snake_case , storage_options=snake_case , embed_local_files=snake_case , )
UpperCamelCase_ : Optional[Any] = pa.Table.from_batches([batch] )
writer.write_table(snake_case )
if writer._num_bytes > 0:
UpperCamelCase_, UpperCamelCase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case ) ):
UpperCamelCase_ : Dict = os.path.join(os.path.dirname(snake_case ) , os.path.basename(snake_case ) )
shutil.move(snake_case , snake_case )
UpperCamelCase_ : int = (
self.df.mapInArrow(snake_case , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : "datasets.SplitGenerator" , snake_case : str = "arrow" , snake_case : Optional[Union[str, int]] = None , snake_case : Optional[int] = None , **snake_case : Any , ) -> int:
"""simple docstring"""
self._validate_cache_dir()
UpperCamelCase_ : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case )
UpperCamelCase_ : List[str] = not is_remote_filesystem(self._fs )
UpperCamelCase_ : List[Any] = os.path.join if is_local else posixpath.join
UpperCamelCase_ : Optional[int] = '-TTTTT-SSSSS-of-NNNNN'
UpperCamelCase_ : Dict = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
UpperCamelCase_ : int = path_join(self._output_dir , snake_case )
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(snake_case , snake_case , snake_case ):
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
) : Optional[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case )
UpperCamelCase_ : Optional[Any] = total_num_examples
UpperCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards." )
if total_shards > 1:
UpperCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCamelCase_ : int = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
snake_case : int , snake_case : int , snake_case : int , ):
rename(
snake_case , fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace('TTTTT-SSSSS' , f"{global_shard_id:05d}" ).replace('NNNNN' , f"{total_shards:05d}" ) , )
UpperCamelCase_ : Any = []
UpperCamelCase_ : Optional[int] = 0
for i in range(len(snake_case ) ):
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(snake_case ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case , len(snake_case ) ).map(lambda snake_case : _rename_shard(*snake_case ) ).collect()
else:
# don't use any pattern
UpperCamelCase_ : Tuple = 0
UpperCamelCase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace(snake_case , '' ) , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 175 | 1 |
lowercase__ : List[Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowercase__ : str = {value: key for key, value in encode_dict.items()}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
a = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces")
return encoded
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
if set(__UpperCamelCase) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces")
a = ""
for word in coded.split():
while len(__UpperCamelCase) != 0:
decoded += decode_dict[word[:5]]
a = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 358 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCamelCase__ ) , """Tatoeba directory does not exist.""" )
class a__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 180 | 0 |
from math import sqrt
def lowerCAmelCase_ ( _lowercase : Tuple) -> int:
"""simple docstring"""
a__ : str = 0
for i in range(1 , int(sqrt(_lowercase) + 1)):
if n % i == 0 and i != sqrt(_lowercase):
total += i + n // i
elif i == sqrt(_lowercase):
total += i
return total - n
def lowerCAmelCase_ ( _lowercase : Optional[Any] = 1_0000) -> int:
"""simple docstring"""
a__ : Optional[int] = sum(
i
for i in range(1 , _lowercase)
if sum_of_divisors(sum_of_divisors(_lowercase)) == i and sum_of_divisors(_lowercase) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 170 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__(self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase__ : List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase__ : str = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
UpperCAmelCase__ : int = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : str = do_center_crop
UpperCAmelCase__ : Dict = crop_size
UpperCAmelCase__ : List[str] = do_flip_channel_order
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(_lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
return flip_channel_order(_lowerCamelCase , data_format=_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[str] = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : str = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
UpperCAmelCase__ : List[str] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCAmelCase__ : Tuple = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Optional[Any] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[Any] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase__ : Any = [self.flip_channel_order(image=_lowerCamelCase ) for image in images]
UpperCAmelCase__ : int = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCAmelCase__ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = target_sizes.numpy()
UpperCAmelCase__ : Tuple = []
for idx in range(len(_lowerCamelCase ) ):
UpperCAmelCase__ : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_lowerCamelCase )
UpperCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCamelCase )
else:
UpperCAmelCase__ : str = logits.argmax(dim=1 )
UpperCAmelCase__ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 171 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCamelCase : Tuple = logging.getLogger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCAmelCase :Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCAmelCase :Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
_UpperCAmelCase :Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCAmelCase :bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase :Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class A__ :
_UpperCAmelCase :str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
_UpperCAmelCase :Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
_UpperCAmelCase :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCAmelCase :bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def A_ ( ) -> Optional[int]:
UpperCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
UpperCamelCase : Tuple = import_module("tasks" )
try:
UpperCamelCase : Union[str, Any] = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type )
UpperCamelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCamelCase : Optional[int] = token_classification_task.get_labels(data_args.labels )
UpperCamelCase : Dict[int, str] = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCamelCase : Dict = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase : Optional[Any] = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase : Tuple = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowerCAmelCase , _lowerCAmelCase ) -> Tuple[List[int], List[int]]:
UpperCamelCase : Optional[int] = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 )
UpperCamelCase : Optional[Any] = preds.shape
UpperCamelCase : str = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCamelCase : Any = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowerCAmelCase ) -> Dict:
UpperCamelCase : Optional[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
}
# Data collator
UpperCamelCase : List[Any] = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase : Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase : str = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase : List[Any] = trainer.evaluate()
UpperCamelCase : Any = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write("%s = %s\n" % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
UpperCamelCase : str = TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCamelCase : List[Any] = trainer.predict(_SCREAMING_SNAKE_CASE )
UpperCamelCase : int = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
UpperCamelCase : List[Any] = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
def A_ ( _lowerCAmelCase ) -> Optional[int]:
main()
if __name__ == "__main__":
main()
| 351 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger("""transformers.models.speecht5""")
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
hf_model.apply_weight_norm()
UpperCamelCase : int = checkpoint["input_conv.weight_g"]
UpperCamelCase : Dict = checkpoint["input_conv.weight_v"]
UpperCamelCase : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCamelCase : List[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCamelCase : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_g"]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_v"]
UpperCamelCase : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> Tuple:
if config_path is not None:
UpperCamelCase : List[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : Optional[int] = SpeechTaHifiGanConfig()
UpperCamelCase : List[str] = SpeechTaHifiGan(_lowerCAmelCase )
UpperCamelCase : str = torch.load(_lowerCAmelCase )
load_weights(orig_checkpoint["model"]["generator"] , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[Any] = np.load(_lowerCAmelCase )
UpperCamelCase : List[str] = stats[0].reshape(-1 )
UpperCamelCase : Tuple = stats[1].reshape(-1 )
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 140 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
A__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.dummy_uncond_unet
A__ = DDIMScheduler()
A__ = self.dummy_vq_model
A__ = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = ldm(generator=lowercase , num_inference_steps=2 , output_type="numpy" ).images
A__ = torch.manual_seed(0 )
A__ = ldm(generator=lowercase , num_inference_steps=2 , output_type="numpy" , return_dict=lowercase )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A__ = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = ldm(generator=lowercase , num_inference_steps=5 , output_type="numpy" ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
A__ = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 68 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowerCAmelCase__ = json.load(f)
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowercase )
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = F'facebook/wmt19-{pair}'
A__ = self.get_tokenizer(lowercase )
A__ = self.get_model(lowercase )
A__ = bleu_data[pair]["src"]
A__ = bleu_data[pair]["tgt"]
A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase )
A__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A__ = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
A__ = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores["bleu"] , lowercase )
| 68 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Any = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 50257 , UpperCAmelCase_ : int = 1024 , UpperCAmelCase_ : int = 768 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "gelu_new" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 1E-5 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , ):
super().__init__()
lowerCAmelCase : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
lowerCAmelCase : str = prefix_inner_dim
lowerCAmelCase : Any = prefix_hidden_dim
lowerCAmelCase : int = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase : Optional[Any] = (
nn.Linear(self.prefix_hidden_dim , UpperCAmelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase : Union[str, Any] = GPTaConfig(
vocab_size=UpperCAmelCase_ , n_positions=UpperCAmelCase_ , n_embd=UpperCAmelCase_ , n_layer=UpperCAmelCase_ , n_head=UpperCAmelCase_ , n_inner=UpperCAmelCase_ , activation_function=UpperCAmelCase_ , resid_pdrop=UpperCAmelCase_ , embd_pdrop=UpperCAmelCase_ , attn_pdrop=UpperCAmelCase_ , layer_norm_epsilon=UpperCAmelCase_ , initializer_range=UpperCAmelCase_ , scale_attn_weights=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , scale_attn_by_inverse_layer_idx=UpperCAmelCase_ , reorder_and_upcast_attn=UpperCAmelCase_ , )
lowerCAmelCase : Union[str, Any] = GPTaLMHeadModel(UpperCAmelCase_ )
def lowercase__ ( self : Any , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , ):
lowerCAmelCase : Dict = self.transformer.transformer.wte(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = self.encode_prefix(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.decode_prefix(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase : Optional[int] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase : int = self.transformer(inputs_embeds=UpperCAmelCase_ , labels=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.device ):
return torch.zeros(UpperCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : str ):
return self.encode_prefix(UpperCAmelCase_ )
@torch.no_grad()
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ):
lowerCAmelCase : str = torch.split(UpperCAmelCase_ , 1 , dim=0 )
lowerCAmelCase : int = []
lowerCAmelCase : Optional[Any] = []
for feature in features:
lowerCAmelCase : Dict = self.decode_prefix(feature.to(UpperCAmelCase_ ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase , lowerCAmelCase : Tuple = self.generate_beam(
input_embeds=UpperCAmelCase_ , device=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase : Dict = torch.stack(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = torch.stack(UpperCAmelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : int = 5 , UpperCAmelCase_ : int = 67 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[int] = None , ):
lowerCAmelCase : Tuple = eos_token_id
lowerCAmelCase : int = None
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : str = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.int )
lowerCAmelCase : Any = torch.zeros(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase : int = input_embeds
else:
lowerCAmelCase : Union[str, Any] = self.transformer.transformer.wte(UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = self.transformer(inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : Any = outputs.logits
lowerCAmelCase : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase : Optional[Any] = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase , lowerCAmelCase : List[str] = logits.topk(UpperCAmelCase_ , -1 )
lowerCAmelCase : Union[str, Any] = generated.expand(UpperCAmelCase_ , *generated.shape[1:] )
lowerCAmelCase , lowerCAmelCase : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase : int = next_tokens
else:
lowerCAmelCase : Optional[int] = tokens.expand(UpperCAmelCase_ , *tokens.shape[1:] )
lowerCAmelCase : Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase : str = -float(np.inf )
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase : Optional[int] = scores_sum / seq_lengths[:, None]
lowerCAmelCase , lowerCAmelCase : str = scores_sum_average.view(-1 ).topk(UpperCAmelCase_ , -1 )
lowerCAmelCase : List[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase : List[str] = seq_lengths[next_tokens_source]
lowerCAmelCase : List[str] = next_tokens % scores_sum.shape[1]
lowerCAmelCase : Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase : int = tokens[next_tokens_source]
lowerCAmelCase : Dict = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase : List[str] = generated[next_tokens_source]
lowerCAmelCase : Optional[int] = scores_sum_average * seq_lengths
lowerCAmelCase : Union[str, Any] = is_stopped[next_tokens_source]
lowerCAmelCase : int = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase : Any = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase : List[Any] = is_stopped + next_tokens.eq(UpperCAmelCase_ ).squeeze()
if is_stopped.all():
break
lowerCAmelCase : Tuple = scores / seq_lengths
lowerCAmelCase : List[str] = scores.argsort(descending=UpperCAmelCase_ )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase : Tuple = [tokens[i] for i in order]
lowerCAmelCase : str = torch.stack(UpperCAmelCase_ , dim=0 )
lowerCAmelCase : Union[str, Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowerCAmelCase : Dict = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowerCAmelCase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowerCAmelCase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase : List[str] = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids , A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase : Optional[int] = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = dc.update(1 )
_UpperCAmelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
_UpperCAmelCase : Any = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(3 )
_UpperCAmelCase : Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase : Any = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCAmelCase :str = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
a__ =field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
a__ =field(
default=1_2_8 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
a__ =field(
default=a ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = self.task_name.lower()
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''train'''
a__ ='''dev'''
a__ ='''test'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
a__ =42
a__ =42
def __init__( self , A , A , A = None , A = Split.train , A = None , ) -> Dict:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , A , )
_UpperCAmelCase : Dict = args
_UpperCAmelCase : int = glue_processors[args.task_name]()
_UpperCAmelCase : Any = glue_output_modes[args.task_name]
if isinstance(A , A ):
try:
_UpperCAmelCase : int = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_UpperCAmelCase : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
_UpperCAmelCase : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = label_list[2], label_list[1]
_UpperCAmelCase : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : List[Any] = cached_features_file + '''.lock'''
with FileLock(A ):
if os.path.exists(A ) and not args.overwrite_cache:
_UpperCAmelCase : str = time.time()
_UpperCAmelCase : Dict = torch.load(A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
_UpperCAmelCase : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_UpperCAmelCase : List[Any] = self.processor.get_test_examples(args.data_dir )
else:
_UpperCAmelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_UpperCAmelCase : Tuple = examples[:limit_length]
_UpperCAmelCase : str = glue_convert_examples_to_features(
A , A , max_length=args.max_seq_length , label_list=A , output_mode=self.output_mode , )
_UpperCAmelCase : Optional[int] = time.time()
torch.save(self.features , A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self , A ) -> InputFeatures:
return self.features[i]
def __lowerCAmelCase ( self ) -> List[Any]:
return self.label_list
| 68 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCAmelCase__ = 'facebook/wmt19-en-de'
UpperCAmelCase__ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCAmelCase__ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCAmelCase__ = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
UpperCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt')
UpperCAmelCase__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
UpperCAmelCase__ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 288 |
import pprint
import requests
UpperCAmelCase = '''https://zenquotes.io/api'''
def UpperCAmelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def UpperCAmelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCAmelCase = random_quotes()
pprint.pprint(response)
| 195 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
_SCREAMING_SNAKE_CASE : List[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_SCREAMING_SNAKE_CASE : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_SCREAMING_SNAKE_CASE : Any = sorted(arg_to_scheduler.keys())
_SCREAMING_SNAKE_CASE : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class UpperCAmelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int="base" , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : str , ) -> List[Any]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = Path(self.hparams.output_dir )
SCREAMING_SNAKE_CASE__ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert hasattr(self.config , SCREAMING_SNAKE_CASE_ ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , SCREAMING_SNAKE_CASE_ , getattr(self.hparams , SCREAMING_SNAKE_CASE_ ) )
if tokenizer is None:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=SCREAMING_SNAKE_CASE_ , )
else:
SCREAMING_SNAKE_CASE__ = tokenizer
SCREAMING_SNAKE_CASE__ = MODEL_MODES[mode]
if model is None:
SCREAMING_SNAKE_CASE__ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=SCREAMING_SNAKE_CASE_ , )
else:
SCREAMING_SNAKE_CASE__ = model
def lowercase_ ( self : Optional[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ = arg_to_scheduler[self.hparams.lr_scheduler]
SCREAMING_SNAKE_CASE__ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
SCREAMING_SNAKE_CASE__ = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def lowercase_ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.model
SCREAMING_SNAKE_CASE__ = ["""bias""", """LayerNorm.weight"""]
SCREAMING_SNAKE_CASE__ = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
SCREAMING_SNAKE_CASE__ = Adafactor(
SCREAMING_SNAKE_CASE_ , lr=self.hparams.learning_rate , scale_parameter=SCREAMING_SNAKE_CASE_ , relative_step=SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE__ = AdamW(
SCREAMING_SNAKE_CASE_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
SCREAMING_SNAKE_CASE__ = optimizer
SCREAMING_SNAKE_CASE__ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> Dict:
return self.validation_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
return self.validation_end(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
SCREAMING_SNAKE_CASE__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowercase_ ( self : Dict , __lowerCamelCase : Tuple ) -> Any:
if stage == "test":
SCREAMING_SNAKE_CASE__ = len(self.test_dataloader().dataset )
else:
SCREAMING_SNAKE_CASE__ = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = len(self.train_dataloader().dataset )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] = False ) -> str:
raise NotImplementedError('''You must implement this for your task''' )
def lowercase_ ( self : Dict ) -> Union[str, Any]:
return self.train_loader
def lowercase_ ( self : Tuple ) -> Dict:
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : Any ) -> List[str]:
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : int ) -> Dict:
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
SCREAMING_SNAKE_CASE_ , list(filter(SCREAMING_SNAKE_CASE_ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Dict ) -> None:
SCREAMING_SNAKE_CASE__ = self.output_dir.joinpath('''best_tfmr''' )
SCREAMING_SNAKE_CASE__ = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
@staticmethod
def lowercase_ ( __lowerCamelCase : Tuple , __lowerCamelCase : str ) -> int:
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=SCREAMING_SNAKE_CASE_ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / '''test_run''' / '''cache''' ) , type=SCREAMING_SNAKE_CASE_ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=SCREAMING_SNAKE_CASE_ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=SCREAMING_SNAKE_CASE_ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=SCREAMING_SNAKE_CASE_ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=SCREAMING_SNAKE_CASE_ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=SCREAMING_SNAKE_CASE_ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=SCREAMING_SNAKE_CASE_ , metavar=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=SCREAMING_SNAKE_CASE_ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--train_batch_size''' , default=32 , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class UpperCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def lowercase_ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> Optional[Any]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class UpperCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def lowercase_ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = trainer.lr_schedulers[0]["""scheduler"""]
SCREAMING_SNAKE_CASE__ = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Tuple:
rank_zero_info('''***** Validation results *****''' )
SCREAMING_SNAKE_CASE__ = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(SCREAMING_SNAKE_CASE_ , str(metrics[key] ) ) )
def lowercase_ ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Tuple:
rank_zero_info('''***** Test results *****''' )
SCREAMING_SNAKE_CASE__ = trainer.callback_metrics
# Log and save results to file
SCREAMING_SNAKE_CASE__ = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(SCREAMING_SNAKE_CASE_ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(SCREAMING_SNAKE_CASE_ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase__ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase__ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase__ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase__ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase__ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def UpperCAmelCase_ ( _A , _A , _A=None , _A=True , _A=[] , _A=None , _A=None , **_A , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
SCREAMING_SNAKE_CASE__ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
SCREAMING_SNAKE_CASE__ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
SCREAMING_SNAKE_CASE__ = LoggingCallback()
SCREAMING_SNAKE_CASE__ = {}
if args.fpaa:
SCREAMING_SNAKE_CASE__ = 16
if args.gpus > 1:
SCREAMING_SNAKE_CASE__ = """auto"""
SCREAMING_SNAKE_CASE__ = """ddp"""
SCREAMING_SNAKE_CASE__ = args.accumulate_grad_batches
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = """auto"""
SCREAMING_SNAKE_CASE__ = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 366 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "transfo-xl"
a = ["mems"]
a = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , __lowerCamelCase : int=26_7735 , __lowerCamelCase : Any=[2_0000, 4_0000, 20_0000] , __lowerCamelCase : Dict=1024 , __lowerCamelCase : Optional[int]=1024 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Dict=4096 , __lowerCamelCase : int=4 , __lowerCamelCase : Dict=False , __lowerCamelCase : Tuple=18 , __lowerCamelCase : Optional[int]=1600 , __lowerCamelCase : str=1000 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : int=-1 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : int=True , __lowerCamelCase : str="normal" , __lowerCamelCase : List[str]=0.01 , __lowerCamelCase : Any=0.01 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : List[str]=1e-5 , __lowerCamelCase : Union[str, Any]=0 , **__lowerCamelCase : int , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = []
self.cutoffs.extend(__lowerCamelCase )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE__ = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE__ = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = d_embed
SCREAMING_SNAKE_CASE__ = d_head
SCREAMING_SNAKE_CASE__ = d_inner
SCREAMING_SNAKE_CASE__ = div_val
SCREAMING_SNAKE_CASE__ = pre_lnorm
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = mem_len
SCREAMING_SNAKE_CASE__ = same_length
SCREAMING_SNAKE_CASE__ = attn_type
SCREAMING_SNAKE_CASE__ = clamp_len
SCREAMING_SNAKE_CASE__ = sample_softmax
SCREAMING_SNAKE_CASE__ = adaptive
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = dropatt
SCREAMING_SNAKE_CASE__ = untie_r
SCREAMING_SNAKE_CASE__ = init
SCREAMING_SNAKE_CASE__ = init_range
SCREAMING_SNAKE_CASE__ = proj_init_std
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
super().__init__(eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@property
def lowercase_ ( self : str ) -> Dict:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowercase_ ( self : List[str] , __lowerCamelCase : Any ) -> List[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 218 | 0 |
from math import isclose, sqrt
def a_ ( _A , _A , _A ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case__ = point_y / 4 / point_x
snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case__ = outgoing_gradient**2 + 4
snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case__ = x_minus if isclose(_A , _A ) else x_plus
snake_case__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( _A = 1.4 , _A = -9.6 ) -> int:
"""simple docstring"""
snake_case__ = 0
snake_case__ = first_x_coord
snake_case__ = first_y_coord
snake_case__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int:
super().__init__(features=UpperCamelCase )
snake_case__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]:
import torch
if isinstance(UpperCamelCase , UpperCamelCase ) and column:
if all(
isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]:
import torch
if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ):
return value
elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ = {}
if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
snake_case__ = {'dtype': torch.intaa}
elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase , PIL.Image.Image ):
snake_case__ = np.asarray(UpperCamelCase )
return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any:
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]:
return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase )
return self.recursive_tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor":
snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
snake_case__ = self._consolidate(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
for column_name in batch:
snake_case__ = self._consolidate(batch[column_name] )
return batch
| 307 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_UpperCAmelCase : List[str] = "\\n Text data.\n Second line of data."
_UpperCAmelCase : str = "file"
@pytest.fixture(scope='session' )
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
UpperCamelCase = bytes(lowercase , 'utf-8' )
with zstd.open(lowercase , 'wb' ) as f:
f.write(lowercase )
return path
@pytest.fixture
def A ( lowercase ) -> Tuple:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , lowercase ) , 'w' ) as f:
f.write(lowercase )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
UpperCamelCase = input_paths[compression_format]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = DownloadConfig(cache_dir=lowercase , extract_compressed_file=lowercase )
UpperCamelCase = cached_path(lowercase , download_config=lowercase )
with open(lowercase ) as f:
UpperCamelCase = f.read()
with open(lowercase ) as f:
UpperCamelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
UpperCamelCase = 'custom_cache'
UpperCamelCase = 'custom_extracted_dir'
UpperCamelCase = tmp_path / 'custom_extracted_path'
if default_extracted:
UpperCamelCase = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , lowercase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowercase ) )
UpperCamelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase = xz_file
UpperCamelCase = (
DownloadConfig(extract_compressed_file=lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase )
)
UpperCamelCase = cached_path(lowercase , download_config=lowercase )
assert Path(lowercase ).parent.parts[-2:] == expected
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = str(Path(lowercase ).resolve() )
assert cached_path(lowercase ) == text_file
# relative path
UpperCamelCase = str(Path(lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase ) == text_file
def A ( lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowercase ):
cached_path(lowercase )
# relative path
UpperCamelCase = './__missing_file__.txt'
with pytest.raises(lowercase ):
cached_path(lowercase )
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(lowercase ) as f:
UpperCamelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase )
def A ( ) -> Tuple:
'''simple docstring'''
with pytest.raises(lowercase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase )
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase ):
http_get('https://huggingface.co' , temp_file=lowercase )
with pytest.raises(lowercase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase ):
ftp_get('ftp://huggingface.co' , temp_file=lowercase )
with pytest.raises(lowercase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase )
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase ):
fsspec_get('s3://huggingface.co' , temp_file=lowercase )
with pytest.raises(lowercase ):
fsspec_head('s3://huggingface.co' )
| 368 |
from __future__ import annotations
def A ( lowercase , lowercase ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , a % b )
UpperCamelCase = a // b
return (y, x - k * y)
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , lowercase )
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(lowercase , lowercase )
if b < 0:
UpperCamelCase = (b % n + n) % n
return b
def A ( lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 110 | 0 |
"""simple docstring"""
from string import ascii_uppercase
_a = {str(ord(c) - 55): c for c in ascii_uppercase}
def __a ( __lowerCamelCase, __lowerCamelCase ):
if isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
UpperCAmelCase_ : Optional[Any] = ""
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : str = 0
while div != 1:
UpperCAmelCase_ , UpperCAmelCase_ : str = divmod(__lowerCamelCase, __lowerCamelCase )
if base >= 11 and 9 < mod < 36:
UpperCAmelCase_ : Tuple = ALPHABET_VALUES[str(__lowerCamelCase )]
else:
UpperCAmelCase_ : int = str(__lowerCamelCase )
new_value += actual_value
UpperCAmelCase_ : Any = num // base
UpperCAmelCase_ : Optional[int] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 61 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case : List[str] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=None ):
require_version(deps[pkg] , __lowerCAmelCase )
| 240 | 0 |
from __future__ import annotations
from statistics import mean
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list[int]:
__a = [0] * no_of_processes
__a = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__a = burst_time[i]
__a = []
__a = 0
__a = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__a = []
__a = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__a = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__a = i
total_time += burst_time[target_process]
completed += 1
__a = 0
__a = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list[int]:
__a = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__a = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
A : int = 4
A : Union[str, Any] = [2, 5, 3, 7]
A : Dict = [0, 0, 0, 0]
A : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
A : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}") | 371 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( a__ ) -> List[str]:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += [key]
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
def __lowerCAmelCase ( *a__ ) -> str:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += keys
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
class __A( a ):
def __new__( cls , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , '''key_handler''' ):
setattr(_snake_case , '''key_handler''' , {} )
setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a = getattr(_snake_case , '''handle_key''' , [] )
for key in handled_keys:
__a = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
__a = get_character()
if char != KEYMAP["undefined"]:
__a = ord(_snake_case )
__a = cls.key_handler.get(_snake_case )
if handler:
__a = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 33 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=True , ):
__a : Dict = parent
__a : List[str] = batch_size
__a : str = num_channels
__a : Optional[int] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Dict = size_divisor
__a : Dict = do_rescale
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = GLPNImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : str = GLPNImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size_divisor''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''resample''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_rescale''' ) )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 160 |
"""simple docstring"""
from statistics import mean
import numpy as np
def __A ( a_ :list , a_ :list , a_ :list , a_ :int) -> list:
__a : Any = 0
# Number of processes finished
__a : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__a : Any = [0] * no_of_process
# List to include calculation results
__a : str = [0] * no_of_process
# Sort by arrival time.
__a : List[Any] = [burst_time[i] for i in np.argsort(a_)]
__a : Tuple = [process_name[i] for i in np.argsort(a_)]
arrival_time.sort()
while no_of_process > finished_process_count:
__a : Optional[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__a : Dict = arrival_time[i]
__a : Dict = 0
# Index showing the location of the process being performed
__a : Tuple = 0
# Saves the current response ratio.
__a : List[str] = 0
for i in range(0 , a_):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__a : Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__a : Tuple = temp
__a : Optional[Any] = i
# Calculate the turn around time
__a : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__a : int = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __A ( a_ :list , a_ :list , a_ :list , a_ :int) -> list:
__a : Dict = [0] * no_of_process
for i in range(0 , a_):
__a : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A = 5
A = ['''A''', '''B''', '''C''', '''D''', '''E''']
A = [1, 2, 3, 4, 5]
A = [1, 2, 3, 4, 5]
A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}') | 160 | 1 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
"""simple docstring"""
A : Optional[int] = parent
A : Optional[Any] = batch_size
A : Optional[int] = seq_length
A : Optional[Any] = is_training
A : Tuple = use_input_mask
A : Any = use_token_type_ids
A : Union[str, Any] = use_labels
A : List[str] = vocab_size
A : Any = hidden_size
A : Union[str, Any] = embedding_size
A : Dict = num_hidden_layers
A : Tuple = num_attention_heads
A : Dict = intermediate_size
A : Optional[int] = hidden_act
A : Optional[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : List[str] = max_position_embeddings
A : Tuple = type_vocab_size
A : str = type_sequence_label_size
A : Any = initializer_range
A : Union[str, Any] = num_labels
A : List[Any] = num_choices
A : Union[str, Any] = scope
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : int = None
if self.use_input_mask:
A : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A : int = None
if self.use_token_type_ids:
A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Union[str, Any] = None
A : List[Any] = None
A : Tuple = None
if self.use_labels:
A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Any = ids_tensor([self.batch_size] , self.num_choices )
A : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = MobileBertModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
A : str = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = MobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : Dict = MobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : int = MobileBertForPreTraining(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , next_sentence_label=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : str = MobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : int = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : List[str] = self.num_labels
A : Dict = MobileBertForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Any = self.num_labels
A : str = MobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = self.num_choices
A : Tuple = MobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Union[str, Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Dict = config_and_inputs
A : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
A : int = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
A : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = MobileBertModelTester(self )
A : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return torch.tensor(
snake_case__ , dtype=torch.long , device=snake_case__ , )
lowercase : Dict = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(SCREAMING_SNAKE_CASE )
A : Optional[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
A : List[Any] = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Union[str, Any] = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
A : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
A : Tuple = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 311 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (UniPCMultistepScheduler,)
__magic_name__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A : Tuple = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = dict(self.forward_default_kwargs )
A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
A : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = 10
A : Tuple = self.dummy_model()
A : Any = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = dict(self.forward_default_kwargs )
A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
A : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
A : List[Any] = scheduler.timesteps[5]
A : Dict = scheduler.timesteps[6]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
A : Dict = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = self.full_loop()
A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 10
A : Union[str, Any] = self.dummy_model()
A : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 311 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 339 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(_UpperCAmelCase )
return s_dict
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = url.split('/' )[-2]
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = original_checkpoint['dims']
_UpperCAmelCase = original_checkpoint['model_state_dict']
_UpperCAmelCase = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 339 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if openai_config_file == "":
lowerCAmelCase__ : str = OpenAIGPTConfig()
else:
lowerCAmelCase__ : Union[str, Any] = OpenAIGPTConfig.from_json_file(UpperCamelCase )
lowerCAmelCase__ : Dict = OpenAIGPTModel(UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
lowerCAmelCase__ : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 356 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase = 10
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for i in range(UpperCamelCase , UpperCamelCase ):
if array[i] == target:
return i
return -1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = len(UpperCamelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = (left + right) // 3 + 1
lowerCAmelCase__ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase__ : int = one_third - 1
elif array[two_third] < target:
lowerCAmelCase__ : Union[str, Any] = two_third + 1
else:
lowerCAmelCase__ : List[Any] = one_third + 1
lowerCAmelCase__ : List[str] = two_third - 1
else:
return -1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = (left + right) // 3 + 1
lowerCAmelCase__ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase , one_third - 1 , UpperCamelCase , UpperCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase , UpperCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_lowerCAmelCase = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase = ite_ternary_search(collection, target)
_lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('''Not found''')
| 184 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.